diff options
Diffstat (limited to 'drivers')
960 files changed, 53527 insertions, 26633 deletions
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 010f5f579e68..f3c643a0473c 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -197,6 +197,7 @@ config BT_HCIUART_BCM config BT_HCIUART_QCA bool "Qualcomm Atheros protocol support" depends on BT_HCIUART + depends on BT_HCIUART_SERDEV select BT_HCIUART_H4 select BT_QCA help diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 6659f113042c..99cde1f9467d 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -315,10 +315,12 @@ static int btbcm_read_info(struct hci_dev *hdev) return 0; } -static const struct { +struct bcm_subver_table { u16 subver; const char *name; -} bcm_uart_subver_table[] = { +}; + +static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ @@ -330,12 +332,28 @@ static const struct { { } }; -int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len) +static const struct bcm_subver_table bcm_usb_subver_table[] = { + { 0x210b, "BCM43142A0" }, /* 001.001.011 */ + { 0x2112, "BCM4314A0" }, /* 001.001.018 */ + { 0x2118, "BCM20702A0" }, /* 001.001.024 */ + { 0x2126, "BCM4335A0" }, /* 001.001.038 */ + { 0x220e, "BCM20702A1" }, /* 001.002.014 */ + { 0x230f, "BCM4354A2" }, /* 001.003.015 */ + { 0x4106, "BCM4335B0" }, /* 002.001.006 */ + { 0x410e, "BCM20702B0" }, /* 002.001.014 */ + { 0x6109, "BCM4335C0" }, /* 003.001.009 */ + { 0x610c, "BCM4354" }, /* 003.001.012 */ + { } +}; + +int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len, + bool reinit) { - u16 subver, rev; - const char *hw_name = NULL; + u16 subver, rev, pid, vid; + const char *hw_name = "BCM"; struct sk_buff *skb; struct hci_rp_read_local_version *ver; + const struct bcm_subver_table *bcm_subver_table; int i, err; /* Reset */ @@ -354,30 +372,44 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len) kfree_skb(skb); /* Read controller information */ - err = btbcm_read_info(hdev); - if (err) - return err; + if (!reinit) { + err = btbcm_read_info(hdev); + if (err) + return err; + } - switch ((rev & 0xf000) >> 12) { - case 0: - case 1: - case 2: - case 3: - for (i = 0; bcm_uart_subver_table[i].name; i++) { - if (subver == bcm_uart_subver_table[i].subver) { - hw_name = bcm_uart_subver_table[i].name; - break; - } + /* Upper nibble of rev should be between 0 and 3? */ + if (((rev & 0xf000) >> 12) > 3) + return 0; + + bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table : + bcm_uart_subver_table; + + for (i = 0; bcm_subver_table[i].name; i++) { + if (subver == bcm_subver_table[i].subver) { + hw_name = bcm_subver_table[i].name; + break; } + } - snprintf(fw_name, len, "brcm/%s.hcd", hw_name ? : "BCM"); - break; - default: - return 0; + if (hdev->bus == HCI_USB) { + /* Read USB Product Info */ + skb = btbcm_read_usb_product(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + vid = get_unaligned_le16(skb->data + 1); + pid = get_unaligned_le16(skb->data + 3); + kfree_skb(skb); + + snprintf(fw_name, len, "brcm/%s-%4.4x-%4.4x.hcd", + hw_name, vid, pid); + } else { + snprintf(fw_name, len, "brcm/%s.hcd", hw_name); } bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u", - hw_name ? : "BCM", (subver & 0xe000) >> 13, + hw_name, (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); return 0; @@ -386,30 +418,14 @@ EXPORT_SYMBOL_GPL(btbcm_initialize); int btbcm_finalize(struct hci_dev *hdev) { - struct sk_buff *skb; - struct hci_rp_read_local_version *ver; - u16 subver, rev; + char fw_name[64]; int err; - /* Reset */ - err = btbcm_reset(hdev); + /* Re-initialize */ + err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true); if (err) return err; - /* Read Local Version Info */ - skb = btbcm_read_local_version(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - ver = (struct hci_rp_read_local_version *)skb->data; - rev = le16_to_cpu(ver->hci_rev); - subver = le16_to_cpu(ver->lmp_subver); - kfree_skb(skb); - - bt_dev_info(hdev, "BCM (%3.3u.%3.3u.%3.3u) build %4.4u", - (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8, - (subver & 0x00ff), rev & 0x0fff); - btbcm_check_bdaddr(hdev); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); @@ -418,95 +434,18 @@ int btbcm_finalize(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(btbcm_finalize); -static const struct { - u16 subver; - const char *name; -} bcm_usb_subver_table[] = { - { 0x210b, "BCM43142A0" }, /* 001.001.011 */ - { 0x2112, "BCM4314A0" }, /* 001.001.018 */ - { 0x2118, "BCM20702A0" }, /* 001.001.024 */ - { 0x2126, "BCM4335A0" }, /* 001.001.038 */ - { 0x220e, "BCM20702A1" }, /* 001.002.014 */ - { 0x230f, "BCM4354A2" }, /* 001.003.015 */ - { 0x4106, "BCM4335B0" }, /* 002.001.006 */ - { 0x410e, "BCM20702B0" }, /* 002.001.014 */ - { 0x6109, "BCM4335C0" }, /* 003.001.009 */ - { 0x610c, "BCM4354" }, /* 003.001.012 */ - { } -}; - int btbcm_setup_patchram(struct hci_dev *hdev) { char fw_name[64]; const struct firmware *fw; - u16 subver, rev, pid, vid; - const char *hw_name = NULL; struct sk_buff *skb; - struct hci_rp_read_local_version *ver; - int i, err; - - /* Reset */ - err = btbcm_reset(hdev); - if (err) - return err; - - /* Read Local Version Info */ - skb = btbcm_read_local_version(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - ver = (struct hci_rp_read_local_version *)skb->data; - rev = le16_to_cpu(ver->hci_rev); - subver = le16_to_cpu(ver->lmp_subver); - kfree_skb(skb); + int err; - /* Read controller information */ - err = btbcm_read_info(hdev); + /* Initialize */ + err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), false); if (err) return err; - switch ((rev & 0xf000) >> 12) { - case 0: - case 3: - for (i = 0; bcm_uart_subver_table[i].name; i++) { - if (subver == bcm_uart_subver_table[i].subver) { - hw_name = bcm_uart_subver_table[i].name; - break; - } - } - - snprintf(fw_name, sizeof(fw_name), "brcm/%s.hcd", - hw_name ? : "BCM"); - break; - case 1: - case 2: - /* Read USB Product Info */ - skb = btbcm_read_usb_product(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - vid = get_unaligned_le16(skb->data + 1); - pid = get_unaligned_le16(skb->data + 3); - kfree_skb(skb); - - for (i = 0; bcm_usb_subver_table[i].name; i++) { - if (subver == bcm_usb_subver_table[i].subver) { - hw_name = bcm_usb_subver_table[i].name; - break; - } - } - - snprintf(fw_name, sizeof(fw_name), "brcm/%s-%4.4x-%4.4x.hcd", - hw_name ? : "BCM", vid, pid); - break; - default: - return 0; - } - - bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u", - hw_name ? : "BCM", (subver & 0xe000) >> 13, - (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); - err = request_firmware(&fw, fw_name, &hdev->dev); if (err < 0) { bt_dev_info(hdev, "BCM: Patch %s not found", fw_name); @@ -517,25 +456,11 @@ int btbcm_setup_patchram(struct hci_dev *hdev) release_firmware(fw); - /* Reset */ - err = btbcm_reset(hdev); + /* Re-initialize */ + err = btbcm_initialize(hdev, fw_name, sizeof(fw_name), true); if (err) return err; - /* Read Local Version Info */ - skb = btbcm_read_local_version(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - ver = (struct hci_rp_read_local_version *)skb->data; - rev = le16_to_cpu(ver->hci_rev); - subver = le16_to_cpu(ver->lmp_subver); - kfree_skb(skb); - - bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u", - hw_name ? : "BCM", (subver & 0xe000) >> 13, - (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); - /* Read Local Name */ skb = btbcm_read_local_name(hdev); if (IS_ERR(skb)) diff --git a/drivers/bluetooth/btbcm.h b/drivers/bluetooth/btbcm.h index cfe6ad4cc621..5346515c880c 100644 --- a/drivers/bluetooth/btbcm.h +++ b/drivers/bluetooth/btbcm.h @@ -73,7 +73,8 @@ int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw); int btbcm_setup_patchram(struct hci_dev *hdev); int btbcm_setup_apple(struct hci_dev *hdev); -int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len); +int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len, + bool reinit); int btbcm_finalize(struct hci_dev *hdev); #else @@ -104,7 +105,7 @@ static inline int btbcm_setup_apple(struct hci_dev *hdev) } static inline int btbcm_initialize(struct hci_dev *hdev, char *fw_name, - size_t len) + size_t len, bool reinit) { return 0; } diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c index 1828ed8cae7a..c4867576be00 100644 --- a/drivers/bluetooth/btmrvl_debugfs.c +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -35,15 +35,9 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; - char buf[16]; long result, ret; - memset(buf, 0, sizeof(buf)); - - if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) - return -EFAULT; - - ret = kstrtol(buf, 10, &result); + ret = kstrtol_from_user(ubuf, count, 10, &result); if (ret) return ret; @@ -81,15 +75,9 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; - char buf[16]; long result, ret; - memset(buf, 0, sizeof(buf)); - - if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) - return -EFAULT; - - ret = kstrtol(buf, 10, &result); + ret = kstrtol_from_user(ubuf, count, 10, &result); if (ret) return ret; @@ -127,15 +115,9 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; - char buf[16]; long result, ret; - memset(buf, 0, sizeof(buf)); - - if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) - return -EFAULT; - - ret = kstrtol(buf, 10, &result); + ret = kstrtol_from_user(ubuf, count, 10, &result); if (ret) return ret; @@ -167,35 +149,6 @@ static const struct file_operations btmrvl_hscmd_fops = { .llseek = default_llseek, }; -static ssize_t btmrvl_fwdump_write(struct file *file, const char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct btmrvl_private *priv = file->private_data; - char buf[16]; - bool result; - - memset(buf, 0, sizeof(buf)); - - if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) - return -EFAULT; - - if (strtobool(buf, &result)) - return -EINVAL; - - if (!result) - return -EINVAL; - - btmrvl_firmware_dump(priv); - - return count; -} - -static const struct file_operations btmrvl_fwdump_fops = { - .write = btmrvl_fwdump_write, - .open = simple_open, - .llseek = default_llseek, -}; - void btmrvl_debugfs_init(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); @@ -226,8 +179,6 @@ void btmrvl_debugfs_init(struct hci_dev *hdev) priv, &btmrvl_hscmd_fops); debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, priv, &btmrvl_hscfgcmd_fops); - debugfs_create_file("fw_dump", 0200, dbg->config_dir, - priv, &btmrvl_fwdump_fops); dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); debugfs_create_u8("curpsmode", 0444, dbg->status_dir, diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index fc3caf4541ba..f0454541e5fd 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -110,7 +110,6 @@ struct btmrvl_private { u8 *payload, u16 nb); int (*hw_wakeup_firmware)(struct btmrvl_private *priv); int (*hw_process_int_status)(struct btmrvl_private *priv); - void (*firmware_dump)(struct btmrvl_private *priv); spinlock_t driver_lock; /* spinlock used by driver */ #ifdef CONFIG_DEBUG_FS void *debugfs_data; @@ -183,7 +182,6 @@ int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv); int btmrvl_enable_ps(struct btmrvl_private *priv); int btmrvl_prepare_command(struct btmrvl_private *priv); int btmrvl_enable_hs(struct btmrvl_private *priv); -void btmrvl_firmware_dump(struct btmrvl_private *priv); #ifdef CONFIG_DEBUG_FS void btmrvl_debugfs_init(struct hci_dev *hdev); diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index f6c694a1b9b0..708ad21683eb 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -358,12 +358,6 @@ int btmrvl_prepare_command(struct btmrvl_private *priv) return ret; } -void btmrvl_firmware_dump(struct btmrvl_private *priv) -{ - if (priv->firmware_dump) - priv->firmware_dump(priv); -} - static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) { int ret = 0; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 6f99b9f3d57f..888bac49a87b 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1311,9 +1311,11 @@ rdwr_status btmrvl_sdio_rdwr_firmware(struct btmrvl_private *priv, } /* This function dump sdio register and memory data */ -static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv) +static void btmrvl_sdio_coredump(struct device *dev) { - struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + struct sdio_func *func = dev_to_sdio_func(dev); + struct btmrvl_sdio_card *card; + struct btmrvl_private *priv; int ret = 0; unsigned int reg, reg_start, reg_end; enum rdwr_status stat; @@ -1321,6 +1323,9 @@ static void btmrvl_sdio_dump_firmware(struct btmrvl_private *priv) u8 dump_num = 0, idx, i, read_reg, doneflag = 0; u32 memory_size, fw_dump_len = 0; + card = sdio_get_drvdata(func); + priv = card->priv; + /* dump sdio register first */ btmrvl_sdio_dump_regs(priv); @@ -1547,7 +1552,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func, priv->hw_host_to_card = btmrvl_sdio_host_to_card; priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; priv->hw_process_int_status = btmrvl_sdio_process_int_status; - priv->firmware_dump = btmrvl_sdio_dump_firmware; if (btmrvl_register_hdev(priv)) { BT_ERR("Register hdev failed!"); @@ -1717,6 +1721,7 @@ static struct sdio_driver bt_mrvl_sdio = { .remove = btmrvl_sdio_remove, .drv = { .owner = THIS_MODULE, + .coredump = btmrvl_sdio_coredump, .pm = &btmrvl_sdio_pm_ops, } }; diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 2793d4180d2f..8219816c54a0 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -127,28 +127,41 @@ static void rome_tlv_check_data(struct rome_config *config, BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); BT_DBG("Length\t\t : %d bytes", length); + config->dnld_mode = ROME_SKIP_EVT_NONE; + switch (config->type) { case TLV_TYPE_PATCH: tlv_patch = (struct tlv_type_patch *)tlv->data; - BT_DBG("Total Length\t\t : %d bytes", + + /* For Rome version 1.1 to 3.1, all segment commands + * are acked by a vendor specific event (VSE). + * For Rome >= 3.2, the download mode field indicates + * if VSE is skipped by the controller. + * In case VSE is skipped, only the last segment is acked. + */ + config->dnld_mode = tlv_patch->download_mode; + + BT_DBG("Total Length : %d bytes", le32_to_cpu(tlv_patch->total_size)); - BT_DBG("Patch Data Length\t : %d bytes", + BT_DBG("Patch Data Length : %d bytes", le32_to_cpu(tlv_patch->data_length)); BT_DBG("Signing Format Version : 0x%x", tlv_patch->format_version); - BT_DBG("Signature Algorithm\t : 0x%x", + BT_DBG("Signature Algorithm : 0x%x", tlv_patch->signature); - BT_DBG("Reserved\t\t : 0x%x", - le16_to_cpu(tlv_patch->reserved1)); - BT_DBG("Product ID\t\t : 0x%04x", + BT_DBG("Download mode : 0x%x", + tlv_patch->download_mode); + BT_DBG("Reserved : 0x%x", + tlv_patch->reserved1); + BT_DBG("Product ID : 0x%04x", le16_to_cpu(tlv_patch->product_id)); - BT_DBG("Rom Build Version\t : 0x%04x", + BT_DBG("Rom Build Version : 0x%04x", le16_to_cpu(tlv_patch->rom_build)); - BT_DBG("Patch Version\t\t : 0x%04x", + BT_DBG("Patch Version : 0x%04x", le16_to_cpu(tlv_patch->patch_version)); - BT_DBG("Reserved\t\t : 0x%x", + BT_DBG("Reserved : 0x%x", le16_to_cpu(tlv_patch->reserved2)); - BT_DBG("Patch Entry Address\t : 0x%x", + BT_DBG("Patch Entry Address : 0x%x", le32_to_cpu(tlv_patch->entry)); break; @@ -194,8 +207,8 @@ static void rome_tlv_check_data(struct rome_config *config, } } -static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size, - const u8 *data) +static int rome_tlv_send_segment(struct hci_dev *hdev, int seg_size, + const u8 *data, enum rome_tlv_dnld_mode mode) { struct sk_buff *skb; struct edl_event_hdr *edl; @@ -203,12 +216,14 @@ static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size, u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2]; int err = 0; - BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size); - cmd[0] = EDL_PATCH_TLV_REQ_CMD; cmd[1] = seg_size; memcpy(cmd + 2, data, seg_size); + if (mode == ROME_SKIP_EVT_VSE_CC || mode == ROME_SKIP_EVT_VSE) + return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, + cmd); + skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { @@ -245,47 +260,12 @@ out: return err; } -static int rome_tlv_download_request(struct hci_dev *hdev, - const struct firmware *fw) -{ - const u8 *buffer, *data; - int total_segment, remain_size; - int ret, i; - - if (!fw || !fw->data) - return -EINVAL; - - total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT; - remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT; - - BT_DBG("%s: Total segment num %d remain size %d total size %zu", - hdev->name, total_segment, remain_size, fw->size); - - data = fw->data; - for (i = 0; i < total_segment; i++) { - buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT; - ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT, - buffer); - if (ret < 0) - return -EIO; - } - - if (remain_size) { - buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT; - ret = rome_tlv_send_segment(hdev, total_segment, remain_size, - buffer); - if (ret < 0) - return -EIO; - } - - return 0; -} - static int rome_download_firmware(struct hci_dev *hdev, struct rome_config *config) { const struct firmware *fw; - int ret; + const u8 *segment; + int ret, remain, i = 0; bt_dev_info(hdev, "ROME Downloading %s", config->fwname); @@ -298,10 +278,24 @@ static int rome_download_firmware(struct hci_dev *hdev, rome_tlv_check_data(config, fw); - ret = rome_tlv_download_request(hdev, fw); - if (ret) { - BT_ERR("%s: Failed to download file: %s (%d)", hdev->name, - config->fwname, ret); + segment = fw->data; + remain = fw->size; + while (remain > 0) { + int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain); + + bt_dev_dbg(hdev, "Send segment %d, size %d", i++, segsize); + + remain -= segsize; + /* The last segment is always acked regardless download mode */ + if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT) + config->dnld_mode = ROME_SKIP_EVT_NONE; + + ret = rome_tlv_send_segment(hdev, segsize, segment, + config->dnld_mode); + if (ret) + break; + + segment += segsize; } release_firmware(fw); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 65e994b96c47..13d77fd873b6 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -61,6 +61,13 @@ enum qca_bardrate { QCA_BAUDRATE_RESERVED }; +enum rome_tlv_dnld_mode { + ROME_SKIP_EVT_NONE, + ROME_SKIP_EVT_VSE, + ROME_SKIP_EVT_CC, + ROME_SKIP_EVT_VSE_CC +}; + enum rome_tlv_type { TLV_TYPE_PATCH = 1, TLV_TYPE_NVM @@ -70,6 +77,7 @@ struct rome_config { u8 type; char fwname[64]; uint8_t user_baud_rate; + enum rome_tlv_dnld_mode dnld_mode; }; struct edl_event_hdr { @@ -94,7 +102,8 @@ struct tlv_type_patch { __le32 data_length; __u8 format_version; __u8 signature; - __le16 reserved1; + __u8 download_mode; + __u8 reserved1; __le16 product_id; __le16 rom_build; __le16 patch_version; diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 2c9a5fc9137d..7df3eed1ef5e 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -65,6 +65,7 @@ static int btqcomsmd_cmd_callback(struct rpmsg_device *rpdev, void *data, { struct btqcomsmd *btq = priv; + btq->hdev->stat.byte_rx += count; return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count); } @@ -76,12 +77,21 @@ static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb) switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: ret = rpmsg_send(btq->acl_channel, skb->data, skb->len); + if (ret) { + hdev->stat.err_tx++; + break; + } hdev->stat.acl_tx++; hdev->stat.byte_tx += skb->len; break; case HCI_COMMAND_PKT: ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len); + if (ret) { + hdev->stat.err_tx++; + break; + } hdev->stat.cmd_tx++; + hdev->stat.byte_tx += skb->len; break; default: ret = -EILSEQ; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b937cc1e2c07..f73a27ea28cc 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -276,6 +276,8 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -371,6 +373,9 @@ static const struct usb_device_id blacklist_table[] = { /* Additional Realtek 8723BU Bluetooth devices */ { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8723DE Bluetooth devices */ + { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, + /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, @@ -379,6 +384,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8822BE Bluetooth devices */ + { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, /* Silicon Wave based devices */ @@ -406,6 +412,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), }, }, + { + /* Dell Inspiron 5565 (QCA ROME device 0cf3:e009) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5565"), + }, + }, {} }; @@ -2497,11 +2510,9 @@ static const struct qca_device_info qca_devices_table[] = { { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */ }; -static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request, +static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, void *data, u16 size) { - struct btusb_data *btdata = hci_get_drvdata(hdev); - struct usb_device *udev = btdata->udev; int pipe, err; u8 *buf; @@ -2516,7 +2527,7 @@ static int btusb_qca_send_vendor_req(struct hci_dev *hdev, u8 request, err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); if (err < 0) { - bt_dev_err(hdev, "Failed to access otp area (%d)", err); + dev_err(&udev->dev, "Failed to access otp area (%d)", err); goto done; } @@ -2666,20 +2677,38 @@ static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, return err; } +/* identify the ROM version and check whether patches are needed */ +static bool btusb_qca_need_patch(struct usb_device *udev) +{ + struct qca_version ver; + + if (btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, + sizeof(ver)) < 0) + return false; + /* only low ROM versions need patches */ + return !(le32_to_cpu(ver.rom_version) & ~0xffffU); +} + static int btusb_setup_qca(struct hci_dev *hdev) { + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; const struct qca_device_info *info = NULL; struct qca_version ver; u32 ver_rom; u8 status; int i, err; - err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver, + err = btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, sizeof(ver)); if (err < 0) return err; ver_rom = le32_to_cpu(ver.rom_version); + /* Don't care about high ROM versions */ + if (ver_rom & ~0xffffU) + return 0; + for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) { if (ver_rom == qca_devices_table[i].rom_version) info = &qca_devices_table[i]; @@ -2689,7 +2718,7 @@ static int btusb_setup_qca(struct hci_dev *hdev) return -ENODEV; } - err = btusb_qca_send_vendor_req(hdev, QCA_CHECK_STATUS, &status, + err = btusb_qca_send_vendor_req(udev, QCA_CHECK_STATUS, &status, sizeof(status)); if (err < 0) return err; @@ -2903,7 +2932,8 @@ static int btusb_probe(struct usb_interface *intf, /* Old firmware would otherwise let ath3k driver load * patch and sysconfig files */ - if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001) + if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001 && + !btusb_qca_need_patch(udev)) return -ENODEV; } @@ -3065,6 +3095,7 @@ static int btusb_probe(struct usb_interface *intf, } if (id->driver_info & BTUSB_ATH3012) { + data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 441f5e1deb11..ddbd8c6a0ceb 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -380,10 +380,6 @@ static int bcm_open(struct hci_uart *hu) mutex_lock(&bcm_device_lock); if (hu->serdev) { - err = serdev_device_open(hu->serdev); - if (err) - goto err_free; - bcm->dev = serdev_device_get_drvdata(hu->serdev); goto out; } @@ -420,13 +416,10 @@ out: return 0; err_unset_hu: - if (hu->serdev) - serdev_device_close(hu->serdev); #ifdef CONFIG_PM - else + if (!hu->serdev) bcm->dev->hu = NULL; #endif -err_free: mutex_unlock(&bcm_device_lock); hu->priv = NULL; kfree(bcm); @@ -445,7 +438,6 @@ static int bcm_close(struct hci_uart *hu) mutex_lock(&bcm_device_lock); if (hu->serdev) { - serdev_device_close(hu->serdev); bdev = serdev_device_get_drvdata(hu->serdev); } else if (bcm_device_exists(bcm->dev)) { bdev = bcm->dev; @@ -501,7 +493,7 @@ static int bcm_setup(struct hci_uart *hu) hu->hdev->set_diag = bcm_set_diag; hu->hdev->set_bdaddr = btbcm_set_bdaddr; - err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name)); + err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name), false); if (err) return err; @@ -794,19 +786,21 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { { }, }; -#ifdef CONFIG_ACPI -/* IRQ polarity of some chipsets are not defined correctly in ACPI table. */ -static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = { - { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */ - .ident = "Lenovo ThinkPad 8", +/* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */ +static const struct dmi_system_id bcm_broken_irq_dmi_table[] = { + { + .ident = "Meegopad T08", .matches = { - DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"), + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, + "To be filled by OEM."), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "T3 MRD"), + DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V1.1"), }, }, { } }; +#ifdef CONFIG_ACPI static int bcm_resource(struct acpi_resource *ares, void *data) { struct bcm_device *dev = data; @@ -904,6 +898,8 @@ static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered) static int bcm_get_resources(struct bcm_device *dev) { + const struct dmi_system_id *dmi_id; + dev->name = dev_name(dev->dev); if (x86_apple_machine && !bcm_apple_get_resources(dev)) @@ -936,6 +932,13 @@ static int bcm_get_resources(struct bcm_device *dev) dev->irq = gpiod_to_irq(gpio); } + dmi_id = dmi_first_match(bcm_broken_irq_dmi_table); + if (dmi_id) { + dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n", + dmi_id->ident); + dev->irq = 0; + } + dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq); return 0; } @@ -944,7 +947,6 @@ static int bcm_get_resources(struct bcm_device *dev) static int bcm_acpi_probe(struct bcm_device *dev) { LIST_HEAD(resources); - const struct dmi_system_id *dmi_id; const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios; struct resource_entry *entry; int ret; @@ -991,13 +993,6 @@ static int bcm_acpi_probe(struct bcm_device *dev) dev->irq_active_low = irq_polarity; dev_warn(dev->dev, "Overwriting IRQ polarity to active %s by module-param\n", dev->irq_active_low ? "low" : "high"); - } else { - dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table); - if (dmi_id) { - dev_warn(dev->dev, "%s: Overwriting IRQ polarity to active low", - dmi_id->ident); - dev->irq_active_low = true; - } } return 0; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index b6a71705b7d6..963bb0309e25 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -195,7 +195,7 @@ restart: clear_bit(HCI_UART_SENDING, &hu->tx_state); } -static void hci_uart_init_work(struct work_struct *work) +void hci_uart_init_work(struct work_struct *work) { struct hci_uart *hu = container_of(work, struct hci_uart, init_ready); int err; @@ -229,15 +229,6 @@ int hci_uart_init_ready(struct hci_uart *hu) } /* ------- Interface to HCI layer ------ */ -/* Initialize device */ -static int hci_uart_open(struct hci_dev *hdev) -{ - BT_DBG("%s %p", hdev->name, hdev); - - /* Nothing to do for UART driver */ - return 0; -} - /* Reset device */ static int hci_uart_flush(struct hci_dev *hdev) { @@ -264,6 +255,17 @@ static int hci_uart_flush(struct hci_dev *hdev) return 0; } +/* Initialize device */ +static int hci_uart_open(struct hci_dev *hdev) +{ + BT_DBG("%s %p", hdev->name, hdev); + + /* Undo clearing this from hci_uart_close() */ + hdev->flush = hci_uart_flush; + + return 0; +} + /* Close device */ static int hci_uart_close(struct hci_dev *hdev) { @@ -447,6 +449,8 @@ static int hci_uart_setup(struct hci_dev *hdev) btbcm_check_bdaddr(hdev); break; #endif + default: + break; } done: diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 27e414b4e3a2..3e767f245ed5 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -141,7 +141,6 @@ static int ll_open(struct hci_uart *hu) if (hu->serdev) { struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev); - serdev_device_open(hu->serdev); if (!IS_ERR(lldev->ext_clk)) clk_prepare_enable(lldev->ext_clk); } @@ -179,8 +178,6 @@ static int ll_close(struct hci_uart *hu) gpiod_set_value_cansleep(lldev->enable_gpio, 0); clk_disable_unprepare(lldev->ext_clk); - - serdev_device_close(hu->serdev); } hu->priv = NULL; diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 3539fd03f47e..14d159e2042d 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -477,8 +477,6 @@ static int nokia_open(struct hci_uart *hu) dev_dbg(dev, "protocol open"); - serdev_device_open(hu->serdev); - pm_runtime_enable(dev); return 0; @@ -513,7 +511,6 @@ static int nokia_close(struct hci_uart *hu) gpiod_set_value(btdev->wakeup_bt, 0); pm_runtime_disable(&btdev->serdev->dev); - serdev_device_close(btdev->serdev); return 0; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 05ec530b8a3a..51790dd02afb 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -29,7 +29,12 @@ */ #include <linux/kernel.h> +#include <linux/clk.h> #include <linux/debugfs.h> +#include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/serdev.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -50,6 +55,9 @@ #define IBS_TX_IDLE_TIMEOUT_MS 2000 #define BAUDRATE_SETTLE_TIMEOUT_MS 300 +/* susclk rate */ +#define SUSCLK_RATE_32KHZ 32768 + /* HCI_IBS transmit side sleep protocol states */ enum tx_ibs_states { HCI_IBS_TX_ASLEEP, @@ -111,6 +119,12 @@ struct qca_data { u64 votes_off; }; +struct qca_serdev { + struct hci_uart serdev_hu; + struct gpio_desc *bt_en; + struct clk *susclk; +}; + static void __serial_clock_on(struct tty_struct *tty) { /* TODO: Some chipset requires to enable UART clock on client @@ -386,6 +400,7 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t) /* Initialize protocol */ static int qca_open(struct hci_uart *hu) { + struct qca_serdev *qcadev; struct qca_data *qca; BT_DBG("hu %p qca_open", hu); @@ -444,6 +459,13 @@ static int qca_open(struct hci_uart *hu) timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS; + if (hu->serdev) { + serdev_device_open(hu->serdev); + + qcadev = serdev_device_get_drvdata(hu->serdev); + gpiod_set_value_cansleep(qcadev->bt_en, 1); + } + BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", qca->tx_idle_delay, qca->wake_retrans); @@ -512,6 +534,7 @@ static int qca_flush(struct hci_uart *hu) /* Close protocol */ static int qca_close(struct hci_uart *hu) { + struct qca_serdev *qcadev; struct qca_data *qca = hu->priv; BT_DBG("hu %p qca close", hu); @@ -525,6 +548,13 @@ static int qca_close(struct hci_uart *hu) destroy_workqueue(qca->workqueue); qca->hu = NULL; + if (hu->serdev) { + serdev_device_close(hu->serdev); + + qcadev = serdev_device_get_drvdata(hu->serdev); + gpiod_set_value_cansleep(qcadev->bt_en, 0); + } + kfree_skb(qca->rx_skb); hu->priv = NULL; @@ -880,11 +910,19 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_RUNNING); return 0; } +static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + if (hu->serdev) + serdev_device_set_baudrate(hu->serdev, speed); + else + hci_uart_set_baudrate(hu, speed); +} + static int qca_setup(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; @@ -905,7 +943,7 @@ static int qca_setup(struct hci_uart *hu) speed = hu->proto->init_speed; if (speed) - hci_uart_set_baudrate(hu, speed); + host_set_baudrate(hu, speed); /* Setup user speed if needed */ speed = 0; @@ -924,7 +962,7 @@ static int qca_setup(struct hci_uart *hu) ret); return ret; } - hci_uart_set_baudrate(hu, speed); + host_set_baudrate(hu, speed); } /* Setup patch / NVM configurations */ @@ -935,6 +973,12 @@ static int qca_setup(struct hci_uart *hu) } else if (ret == -ENOENT) { /* No patch/nvm-config found, run with original fw/config */ ret = 0; + } else if (ret == -EAGAIN) { + /* + * Userspace firmware loader will return -EAGAIN in case no + * patch/nvm-config is found, so run with original fw/config. + */ + ret = 0; } /* Setup bdaddr */ @@ -958,12 +1002,80 @@ static struct hci_uart_proto qca_proto = { .dequeue = qca_dequeue, }; +static int qca_serdev_probe(struct serdev_device *serdev) +{ + struct qca_serdev *qcadev; + int err; + + qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL); + if (!qcadev) + return -ENOMEM; + + qcadev->serdev_hu.serdev = serdev; + serdev_device_set_drvdata(serdev, qcadev); + + qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable", + GPIOD_OUT_LOW); + if (IS_ERR(qcadev->bt_en)) { + dev_err(&serdev->dev, "failed to acquire enable gpio\n"); + return PTR_ERR(qcadev->bt_en); + } + + qcadev->susclk = devm_clk_get(&serdev->dev, NULL); + if (IS_ERR(qcadev->susclk)) { + dev_err(&serdev->dev, "failed to acquire clk\n"); + return PTR_ERR(qcadev->susclk); + } + + err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); + if (err) + return err; + + err = clk_prepare_enable(qcadev->susclk); + if (err) + return err; + + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); + if (err) + clk_disable_unprepare(qcadev->susclk); + + return err; +} + +static void qca_serdev_remove(struct serdev_device *serdev) +{ + struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&qcadev->serdev_hu); + + clk_disable_unprepare(qcadev->susclk); +} + +static const struct of_device_id qca_bluetooth_of_match[] = { + { .compatible = "qcom,qca6174-bt" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); + +static struct serdev_device_driver qca_serdev_driver = { + .probe = qca_serdev_probe, + .remove = qca_serdev_remove, + .driver = { + .name = "hci_uart_qca", + .of_match_table = qca_bluetooth_of_match, + }, +}; + int __init qca_init(void) { + serdev_device_driver_register(&qca_serdev_driver); + return hci_uart_register_proto(&qca_proto); } int __exit qca_deinit(void) { + serdev_device_driver_unregister(&qca_serdev_driver); + return hci_uart_unregister_proto(&qca_proto); } diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index e0e6461b9200..aa2543b3c286 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -101,14 +101,6 @@ static void hci_uart_write_work(struct work_struct *work) /* ------- Interface to HCI layer ------ */ -/* Initialize device */ -static int hci_uart_open(struct hci_dev *hdev) -{ - BT_DBG("%s %p", hdev->name, hdev); - - return 0; -} - /* Reset device */ static int hci_uart_flush(struct hci_dev *hdev) { @@ -129,6 +121,17 @@ static int hci_uart_flush(struct hci_dev *hdev) return 0; } +/* Initialize device */ +static int hci_uart_open(struct hci_dev *hdev) +{ + BT_DBG("%s %p", hdev->name, hdev); + + /* Undo clearing this from hci_uart_close() */ + hdev->flush = hci_uart_flush; + + return 0; +} + /* Close device */ static int hci_uart_close(struct hci_dev *hdev) { @@ -204,9 +207,8 @@ static int hci_uart_setup(struct hci_dev *hdev) return 0; } - if (skb->len != sizeof(*ver)) { + if (skb->len != sizeof(*ver)) bt_dev_err(hdev, "Event length mismatch for version info"); - } kfree_skb(skb); return 0; @@ -282,10 +284,14 @@ int hci_uart_register_device(struct hci_uart *hu, serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); - err = p->open(hu); + err = serdev_device_open(hu->serdev); if (err) return err; + err = p->open(hu); + if (err) + goto err_open; + hu->proto = p; set_bit(HCI_UART_PROTO_READY, &hu->flags); @@ -302,6 +308,7 @@ int hci_uart_register_device(struct hci_uart *hu, hdev->bus = HCI_UART; hci_set_drvdata(hdev, hu); + INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); percpu_init_rwsem(&hu->proto_lock); @@ -351,6 +358,8 @@ err_register: err_alloc: clear_bit(HCI_UART_PROTO_READY, &hu->flags); p->close(hu); +err_open: + serdev_device_close(hu->serdev); return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); @@ -365,5 +374,6 @@ void hci_uart_unregister_device(struct hci_uart *hu) cancel_work_sync(&hu->write_work); hu->proto->close(hu); + serdev_device_close(hu->serdev); } EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 66e8c68e4607..00cab2fd7a1b 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -116,6 +116,7 @@ void hci_uart_unregister_device(struct hci_uart *hu); int hci_uart_tx_wakeup(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); +void hci_uart_init_work(struct work_struct *work); void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed); void hci_uart_set_flow_control(struct hci_uart *hu, bool enable); void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index a782ce87715c..ed5e42461094 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -262,6 +262,8 @@ void proc_coredump_connector(struct task_struct *task) ev->what = PROC_EVENT_COREDUMP; ev->event_data.coredump.process_pid = task->pid; ev->event_data.coredump.process_tgid = task->tgid; + ev->event_data.coredump.parent_pid = task->real_parent->pid; + ev->event_data.coredump.parent_tgid = task->real_parent->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ @@ -288,6 +290,8 @@ void proc_exit_connector(struct task_struct *task) ev->event_data.exit.process_tgid = task->tgid; ev->event_data.exit.exit_code = task->exit_code; ev->event_data.exit.exit_signal = task->exit_signal; + ev->event_data.exit.parent_pid = task->real_parent->pid; + ev->event_data.exit.parent_tgid = task->real_parent->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 7afbb28d6a0f..1bc5ffb338c8 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -270,7 +270,7 @@ EXPORT_SYMBOL_GPL(dca_remove_requester); * @dev - the device that wants dca service * @cpu - the cpuid as returned by get_cpu() */ -u8 dca_common_get_tag(struct device *dev, int cpu) +static u8 dca_common_get_tag(struct device *dev, int cpu) { struct dca_provider *dca; u8 tag; diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 77d257ec899b..6d52ea03574e 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -849,7 +849,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, return 0; err_cqb: - kfree(*cqb); + kvfree(*cqb); err_db: mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db); diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index b816c80df50b..7e1f7021396a 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum { RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR, RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR, RDMA_CQE_REQ_STS_XRC_VOILATION_ERR, + RDMA_CQE_REQ_STS_SIG_ERR, MAX_RDMA_CQE_REQUESTER_STATUS_ENUM }; @@ -152,12 +153,12 @@ struct rdma_rq_sge { struct regpair addr; __le32 length; __le32 flags; -#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF -#define RDMA_RQ_SGE_L_KEY_SHIFT 0 +#define RDMA_RQ_SGE_L_KEY_LO_MASK 0x3FFFFFF +#define RDMA_RQ_SGE_L_KEY_LO_SHIFT 0 #define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 #define RDMA_RQ_SGE_NUM_SGES_SHIFT 26 -#define RDMA_RQ_SGE_RESERVED0_MASK 0x7 -#define RDMA_RQ_SGE_RESERVED0_SHIFT 29 +#define RDMA_RQ_SGE_L_KEY_HI_MASK 0x7 +#define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29 }; struct rdma_srq_sge { @@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg { MAX_RDMA_DIF_IO_DIRECTION_FLG }; -/* RDMA DIF Runt Result Structure */ -struct rdma_dif_runt_result { - __le16 guard_tag; - __le16 reserved[3]; +struct rdma_dif_params { + __le32 base_ref_tag; + __le16 app_tag; + __le16 app_tag_mask; + __le16 runt_crc_value; + __le16 flags; +#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK 0x1 +#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT 1 +#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_DIF_PARAMS_CRC_SEED_MASK 0x1 +#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT 6 +#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK 0x1 +#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT 8 +#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK 0x1 +#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT 9 +#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK 0x1 +#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT 10 +#define RDMA_DIF_PARAMS_RESERVED4_MASK 0x1F +#define RDMA_DIF_PARAMS_RESERVED4_SHIFT 11 + __le32 reserved5; }; -/* Memory window type enumeration */ -enum rdma_mw_type { - RDMA_MW_TYPE_1, - RDMA_MW_TYPE_2A, - MAX_RDMA_MW_TYPE -}; struct rdma_sq_atomic_wqe { __le32 reserved1; @@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe { #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4 -#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7 -#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5 +#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x3 +#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 6 u8 wqe_size; u8 prev_wqe_size; u8 bind_ctrl; #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0 -#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 -#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1 -#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F -#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2 +#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x7F +#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 1 u8 access_ctrl; #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0 @@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe { __le32 length_lo; __le32 parent_l_key; __le32 reserved4; + struct rdma_dif_params dif_params; }; /* First element (16 bytes) of bind wqe */ @@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd { u8 bind_ctrl; #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0 -#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 -#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1 -#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F -#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2 +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x7F +#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 1 u8 access_ctrl; #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0 @@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd { __le32 reserved4; }; +/* Third element (16 bytes) of bind wqe */ +struct rdma_sq_bind_wqe_3rd { + struct rdma_dif_params dif_params; +}; + /* Structure with only the SQ WQE common * fields. Size is of one SQ element (16B) */ @@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe { u8 length_hi; __le32 length_lo; struct regpair pbl_addr; - __le32 dif_base_ref_tag; - __le16 dif_app_tag; - __le16 dif_app_tag_mask; - __le16 dif_runt_crc_value; - __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7 -#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 - __le32 reserved5; }; /* First element (16 bytes) of fmr wqe */ @@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd { struct regpair pbl_addr; }; -/* Third element (16 bytes) of fmr wqe */ -struct rdma_sq_fmr_wqe_3rd { - __le32 dif_base_ref_tag; - __le16 dif_app_tag; - __le16 dif_app_tag_mask; - __le16 dif_runt_crc_value; - __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7 -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 - __le32 reserved5; -}; struct rdma_sq_local_inv_wqe { struct regpair reserved; @@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe { #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6 -#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7 +#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 7 u8 wqe_size; u8 prev_wqe_size; struct regpair remote_va; @@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe { u8 dif_flags; #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0 -#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1 -#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2 -#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F -#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3 - u8 reserved2[3]; +#define RDMA_SQ_RDMA_WQE_RESERVED2_MASK 0x7F +#define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT 1 + u8 reserved3[3]; }; /* First element (16 bytes) of rdma wqe */ diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 3f9afc02d166..e2caabb8a926 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, wr->num_sge); - SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, wr->sg_list[i].lkey); RQ_SGE_SET(rqe, wr->sg_list[i].addr, @@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, /* First one must include the number * of SGE in the list */ - SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0); + SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0); SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1); RQ_SGE_SET(rqe, 0, 0, flags); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c index 4be3aef40bd2..267da8215e08 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c @@ -443,17 +443,16 @@ static u8 opa_vnic_get_rc(struct __opa_veswport_info *info, } /* opa_vnic_calc_entropy - calculate the packet entropy */ -u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb) +u8 opa_vnic_calc_entropy(struct sk_buff *skb) { - u16 hash16; - - /* - * Get flow based 16-bit hash and then XOR the upper and lower bytes - * to get the entropy. - * __skb_tx_hash limits qcount to 16 bits. Hence, get 15-bit hash. - */ - hash16 = __skb_tx_hash(adapter->netdev, skb, BIT(15)); - return (u8)((hash16 >> 8) ^ (hash16 & 0xff)); + u32 hash = skb_get_hash(skb); + + /* store XOR of all bytes in lower 8 bits */ + hash ^= hash >> 8; + hash ^= hash >> 16; + + /* return lower 8 bits as entropy */ + return (u8)(hash & 0xFF); } /* opa_vnic_get_def_port - get default port based on entropy */ @@ -490,7 +489,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb) hdr = skb_push(skb, OPA_VNIC_HDR_LEN); - entropy = opa_vnic_calc_entropy(adapter, skb); + entropy = opa_vnic_calc_entropy(skb); def_port = opa_vnic_get_def_port(adapter, entropy); len = opa_vnic_wire_length(skb); dlid = opa_vnic_get_dlid(adapter, skb, def_port); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h index afd95f432262..43ac61ffef4a 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_internal.h @@ -299,7 +299,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter); void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb); u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb); -u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb); +u8 opa_vnic_calc_entropy(struct sk_buff *skb); void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter); void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter); void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter, diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c index ce57e0f10289..0c8aec62a425 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c @@ -104,7 +104,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, /* pass entropy and vl as metadata in skb */ mdata = skb_push(skb, sizeof(*mdata)); - mdata->entropy = opa_vnic_calc_entropy(adapter, skb); + mdata->entropy = opa_vnic_calc_entropy(skb); mdata->vl = opa_vnic_get_vl(adapter, skb); rc = adapter->rn_ops->ndo_select_queue(netdev, skb, accel_priv, fallback); diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index eb2c3b6eca7f..d5b35a6ba899 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -25,6 +25,19 @@ config LIRC passes raw IR to and from userspace, which is needed for IR transmitting (aka "blasting") and for the lirc daemon. +config BPF_LIRC_MODE2 + bool "Support for eBPF programs attached to lirc devices" + depends on BPF_SYSCALL + depends on RC_CORE=y + depends on LIRC + help + Allow attaching eBPF programs to a lirc device using the bpf(2) + syscall command BPF_PROG_ATTACH. This is supported for raw IR + receivers. + + These eBPF programs can be used to decode IR into scancodes, for + IR protocols not supported by the kernel decoders. + menuconfig RC_DECODERS bool "Remote controller decoders" depends on RC_CORE diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 2e1c87066f6c..e0340d043fe8 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -5,6 +5,7 @@ obj-y += keymaps/ obj-$(CONFIG_RC_CORE) += rc-core.o rc-core-y := rc-main.o rc-ir-raw.o rc-core-$(CONFIG_LIRC) += lirc_dev.o +rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o obj-$(CONFIG_IR_RC6_DECODER) += ir-rc6-decoder.o diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c new file mode 100644 index 000000000000..40826bba06b6 --- /dev/null +++ b/drivers/media/rc/bpf-lirc.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +// bpf-lirc.c - handles bpf +// +// Copyright (C) 2018 Sean Young <sean@mess.org> + +#include <linux/bpf.h> +#include <linux/filter.h> +#include <linux/bpf_lirc.h> +#include "rc-core-priv.h" + +/* + * BPF interface for raw IR + */ +const struct bpf_prog_ops lirc_mode2_prog_ops = { +}; + +BPF_CALL_1(bpf_rc_repeat, u32*, sample) +{ + struct ir_raw_event_ctrl *ctrl; + + ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample); + + rc_repeat(ctrl->dev); + + return 0; +} + +static const struct bpf_func_proto rc_repeat_proto = { + .func = bpf_rc_repeat, + .gpl_only = true, /* rc_repeat is EXPORT_SYMBOL_GPL */ + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +/* + * Currently rc-core does not support 64-bit scancodes, but there are many + * known protocols with more than 32 bits. So, define the interface as u64 + * as a future-proof. + */ +BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode, + u32, toggle) +{ + struct ir_raw_event_ctrl *ctrl; + + ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample); + + rc_keydown(ctrl->dev, protocol, scancode, toggle != 0); + + return 0; +} + +static const struct bpf_func_proto rc_keydown_proto = { + .func = bpf_rc_keydown, + .gpl_only = true, /* rc_keydown is EXPORT_SYMBOL_GPL */ + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto * +lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_rc_repeat: + return &rc_repeat_proto; + case BPF_FUNC_rc_keydown: + return &rc_keydown_proto; + case BPF_FUNC_map_lookup_elem: + return &bpf_map_lookup_elem_proto; + case BPF_FUNC_map_update_elem: + return &bpf_map_update_elem_proto; + case BPF_FUNC_map_delete_elem: + return &bpf_map_delete_elem_proto; + case BPF_FUNC_ktime_get_ns: + return &bpf_ktime_get_ns_proto; + case BPF_FUNC_tail_call: + return &bpf_tail_call_proto; + case BPF_FUNC_get_prandom_u32: + return &bpf_get_prandom_u32_proto; + case BPF_FUNC_trace_printk: + if (capable(CAP_SYS_ADMIN)) + return bpf_get_trace_printk_proto(); + /* fall through */ + default: + return NULL; + } +} + +static bool lirc_mode2_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + /* We have one field of u32 */ + return type == BPF_READ && off == 0 && size == sizeof(u32); +} + +const struct bpf_verifier_ops lirc_mode2_verifier_ops = { + .get_func_proto = lirc_mode2_func_proto, + .is_valid_access = lirc_mode2_is_valid_access +}; + +#define BPF_MAX_PROGS 64 + +static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog) +{ + struct bpf_prog_array __rcu *old_array; + struct bpf_prog_array *new_array; + struct ir_raw_event_ctrl *raw; + int ret; + + if (rcdev->driver_type != RC_DRIVER_IR_RAW) + return -EINVAL; + + ret = mutex_lock_interruptible(&ir_raw_handler_lock); + if (ret) + return ret; + + raw = rcdev->raw; + if (!raw) { + ret = -ENODEV; + goto unlock; + } + + if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) { + ret = -E2BIG; + goto unlock; + } + + old_array = raw->progs; + ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array); + if (ret < 0) + goto unlock; + + rcu_assign_pointer(raw->progs, new_array); + bpf_prog_array_free(old_array); + +unlock: + mutex_unlock(&ir_raw_handler_lock); + return ret; +} + +static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog) +{ + struct bpf_prog_array __rcu *old_array; + struct bpf_prog_array *new_array; + struct ir_raw_event_ctrl *raw; + int ret; + + if (rcdev->driver_type != RC_DRIVER_IR_RAW) + return -EINVAL; + + ret = mutex_lock_interruptible(&ir_raw_handler_lock); + if (ret) + return ret; + + raw = rcdev->raw; + if (!raw) { + ret = -ENODEV; + goto unlock; + } + + old_array = raw->progs; + ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array); + /* + * Do not use bpf_prog_array_delete_safe() as we would end up + * with a dummy entry in the array, and the we would free the + * dummy in lirc_bpf_free() + */ + if (ret) + goto unlock; + + rcu_assign_pointer(raw->progs, new_array); + bpf_prog_array_free(old_array); +unlock: + mutex_unlock(&ir_raw_handler_lock); + return ret; +} + +void lirc_bpf_run(struct rc_dev *rcdev, u32 sample) +{ + struct ir_raw_event_ctrl *raw = rcdev->raw; + + raw->bpf_sample = sample; + + if (raw->progs) + BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, BPF_PROG_RUN); +} + +/* + * This should be called once the rc thread has been stopped, so there can be + * no concurrent bpf execution. + */ +void lirc_bpf_free(struct rc_dev *rcdev) +{ + struct bpf_prog **progs; + + if (!rcdev->raw->progs) + return; + + progs = rcu_dereference(rcdev->raw->progs)->progs; + while (*progs) + bpf_prog_put(*progs++); + + bpf_prog_array_free(rcdev->raw->progs); +} + +int lirc_prog_attach(const union bpf_attr *attr) +{ + struct bpf_prog *prog; + struct rc_dev *rcdev; + int ret; + + if (attr->attach_flags) + return -EINVAL; + + prog = bpf_prog_get_type(attr->attach_bpf_fd, + BPF_PROG_TYPE_LIRC_MODE2); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + rcdev = rc_dev_get_from_fd(attr->target_fd); + if (IS_ERR(rcdev)) { + bpf_prog_put(prog); + return PTR_ERR(rcdev); + } + + ret = lirc_bpf_attach(rcdev, prog); + if (ret) + bpf_prog_put(prog); + + put_device(&rcdev->dev); + + return ret; +} + +int lirc_prog_detach(const union bpf_attr *attr) +{ + struct bpf_prog *prog; + struct rc_dev *rcdev; + int ret; + + if (attr->attach_flags) + return -EINVAL; + + prog = bpf_prog_get_type(attr->attach_bpf_fd, + BPF_PROG_TYPE_LIRC_MODE2); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + rcdev = rc_dev_get_from_fd(attr->target_fd); + if (IS_ERR(rcdev)) { + bpf_prog_put(prog); + return PTR_ERR(rcdev); + } + + ret = lirc_bpf_detach(rcdev, prog); + + bpf_prog_put(prog); + put_device(&rcdev->dev); + + return ret; +} + +int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) +{ + __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); + struct bpf_prog_array __rcu *progs; + struct rc_dev *rcdev; + u32 cnt, flags = 0; + int ret; + + if (attr->query.query_flags) + return -EINVAL; + + rcdev = rc_dev_get_from_fd(attr->query.target_fd); + if (IS_ERR(rcdev)) + return PTR_ERR(rcdev); + + if (rcdev->driver_type != RC_DRIVER_IR_RAW) { + ret = -EINVAL; + goto put; + } + + ret = mutex_lock_interruptible(&ir_raw_handler_lock); + if (ret) + goto put; + + progs = rcdev->raw->progs; + cnt = progs ? bpf_prog_array_length(progs) : 0; + + if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) { + ret = -EFAULT; + goto unlock; + } + + if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) { + ret = -EFAULT; + goto unlock; + } + + if (attr->query.prog_cnt != 0 && prog_ids && cnt) + ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt); + +unlock: + mutex_unlock(&ir_raw_handler_lock); +put: + put_device(&rcdev->dev); + + return ret; +} diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c index 24e9fbb80e81..da7013a12a58 100644 --- a/drivers/media/rc/lirc_dev.c +++ b/drivers/media/rc/lirc_dev.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/device.h> +#include <linux/file.h> #include <linux/idr.h> #include <linux/poll.h> #include <linux/sched.h> @@ -104,6 +105,12 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev) TO_US(ev.duration), TO_STR(ev.pulse)); } + /* + * bpf does not care about the gap generated above; that exists + * for backwards compatibility + */ + lirc_bpf_run(dev, sample); + spin_lock_irqsave(&dev->lirc_fh_lock, flags); list_for_each_entry(fh, &dev->lirc_fh, list) { if (LIRC_IS_TIMEOUT(sample) && !fh->send_timeout_reports) @@ -816,4 +823,27 @@ void __exit lirc_dev_exit(void) unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX); } +struct rc_dev *rc_dev_get_from_fd(int fd) +{ + struct fd f = fdget(fd); + struct lirc_fh *fh; + struct rc_dev *dev; + + if (!f.file) + return ERR_PTR(-EBADF); + + if (f.file->f_op != &lirc_fops) { + fdput(f); + return ERR_PTR(-EINVAL); + } + + fh = f.file->private_data; + dev = fh->rc; + + get_device(&dev->dev); + fdput(f); + + return dev; +} + MODULE_ALIAS("lirc_dev"); diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index e0e6a17460f6..eb004757038b 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -13,6 +13,7 @@ #define MAX_IR_EVENT_SIZE 512 #include <linux/slab.h> +#include <uapi/linux/bpf.h> #include <media/rc-core.h> /** @@ -57,6 +58,11 @@ struct ir_raw_event_ctrl { /* raw decoder state follows */ struct ir_raw_event prev_ev; struct ir_raw_event this_ev; + +#ifdef CONFIG_BPF_LIRC_MODE2 + u32 bpf_sample; + struct bpf_prog_array __rcu *progs; +#endif struct nec_dec { int state; unsigned count; @@ -126,6 +132,9 @@ struct ir_raw_event_ctrl { } imon; }; +/* Mutex for locking raw IR processing and handler change */ +extern struct mutex ir_raw_handler_lock; + /* macros for IR decoders */ static inline bool geq_margin(unsigned d1, unsigned d2, unsigned margin) { @@ -288,6 +297,7 @@ void ir_lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev); void ir_lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc); int ir_lirc_register(struct rc_dev *dev); void ir_lirc_unregister(struct rc_dev *dev); +struct rc_dev *rc_dev_get_from_fd(int fd); #else static inline int lirc_dev_init(void) { return 0; } static inline void lirc_dev_exit(void) {} @@ -299,4 +309,15 @@ static inline int ir_lirc_register(struct rc_dev *dev) { return 0; } static inline void ir_lirc_unregister(struct rc_dev *dev) { } #endif +/* + * bpf interface + */ +#ifdef CONFIG_BPF_LIRC_MODE2 +void lirc_bpf_free(struct rc_dev *dev); +void lirc_bpf_run(struct rc_dev *dev, u32 sample); +#else +static inline void lirc_bpf_free(struct rc_dev *dev) { } +static inline void lirc_bpf_run(struct rc_dev *dev, u32 sample) { } +#endif + #endif /* _RC_CORE_PRIV */ diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index 374f83105a23..7675b7ee5bc7 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -14,7 +14,7 @@ static LIST_HEAD(ir_raw_client_list); /* Used to handle IR raw handler extensions */ -static DEFINE_MUTEX(ir_raw_handler_lock); +DEFINE_MUTEX(ir_raw_handler_lock); static LIST_HEAD(ir_raw_handler_list); static atomic64_t available_protocols = ATOMIC64_INIT(0); @@ -621,9 +621,17 @@ void ir_raw_event_unregister(struct rc_dev *dev) list_for_each_entry(handler, &ir_raw_handler_list, list) if (handler->raw_unregister) handler->raw_unregister(dev); - mutex_unlock(&ir_raw_handler_lock); + + lirc_bpf_free(dev); ir_raw_event_free(dev); + + /* + * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so + * ensure that the raw member is null on unlock; this is how + * "device gone" is checked. + */ + mutex_unlock(&ir_raw_handler_lock); } /* diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 891846655000..d03775100f7d 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -198,6 +198,7 @@ config VXLAN config GENEVE tristate "Generic Network Virtualization Encapsulation" depends on INET && NET_UDP_TUNNEL + depends on IPV6 || !IPV6 select NET_IP_TUNNEL select GRO_CELLS ---help--- @@ -331,6 +332,7 @@ config VETH config VIRTIO_NET tristate "Virtio network driver" depends on VIRTIO + select NET_FAILOVER ---help--- This is the virtual network driver for virtio. It can be used with QEMU based VMMs (like KVM or Xen). Say Y or M. @@ -509,4 +511,16 @@ config NETDEVSIM To compile this driver as a module, choose M here: the module will be called netdevsim. +config NET_FAILOVER + tristate "Failover driver" + select FAILOVER + help + This provides an automated failover mechanism via APIs to create + and destroy a failover master netdev and manages a primary and + standby slave netdevs that get registered via the generic failover + infrastructure. This can be used by paravirtual drivers to enable + an alternate low latency datapath. It alsoenables live migration of + a VM with direct attached VF by failing over to the paravirtual + datapath when the VF is unplugged. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 91e67e375dd4..21cde7e78621 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -78,3 +78,4 @@ obj-$(CONFIG_FUJITSU_ES) += fjes/ thunderbolt-net-y += thunderbolt.o obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o obj-$(CONFIG_NETDEVSIM) += netdevsim/ +obj-$(CONFIG_NET_FAILOVER) += net_failover.o diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 5eb0df2e5464..e82108c917a6 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -40,11 +40,6 @@ #include <net/bonding.h> #include <net/bond_alb.h> - - -static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -}; static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 }; @@ -420,8 +415,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave) if (assigned_slave) { rx_hash_table[index].slave = assigned_slave; - if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst, - mac_bcast)) { + if (is_valid_ether_addr(rx_hash_table[index].mac_dst)) { bond_info->rx_hashtbl[index].ntt = 1; bond_info->rx_ntt = 1; /* A slave has been removed from the @@ -524,7 +518,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla client_info = &(bond_info->rx_hashtbl[hash_index]); if ((client_info->slave == slave) && - !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { + is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; ntt = 1; } @@ -565,7 +559,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip) if ((client_info->ip_src == src_ip) && !ether_addr_equal_64bits(client_info->slave->dev->dev_addr, bond->dev->dev_addr) && - !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { + is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond_info->rx_ntt = 1; } @@ -593,7 +587,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon if ((client_info->ip_src == arp->ip_src) && (client_info->ip_dst == arp->ip_dst)) { /* the entry is already assigned to this client */ - if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) { + if (!is_broadcast_ether_addr(arp->mac_dst)) { /* update mac address from arp */ ether_addr_copy(client_info->mac_dst, arp->mac_dst); } @@ -641,7 +635,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon ether_addr_copy(client_info->mac_src, arp->mac_src); client_info->slave = assigned_slave; - if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { + if (is_valid_ether_addr(client_info->mac_dst)) { client_info->ntt = 1; bond->alb_info.rx_ntt = 1; } else { @@ -733,8 +727,10 @@ static void rlb_rebalance(struct bonding *bond) assigned_slave = __rlb_next_rx_slave(bond); if (assigned_slave && (client_info->slave != assigned_slave)) { client_info->slave = assigned_slave; - client_info->ntt = 1; - ntt = 1; + if (!is_zero_ether_addr(client_info->mac_dst)) { + client_info->ntt = 1; + ntt = 1; + } } } @@ -1319,8 +1315,8 @@ void bond_alb_deinitialize(struct bonding *bond) rlb_deinitialize(bond); } -static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, - struct slave *tx_slave) +static netdev_tx_t bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, + struct slave *tx_slave) { struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); struct ethhdr *eth_data = eth_hdr(skb); @@ -1354,7 +1350,7 @@ out: return NETDEV_TX_OK; } -int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) +netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct ethhdr *eth_data; @@ -1392,7 +1388,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) return bond_do_alb_xmit(skb, bond, tx_slave); } -int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) +netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct ethhdr *eth_data; @@ -1412,9 +1408,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) case ETH_P_IP: { const struct iphdr *iph = ip_hdr(skb); - if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || - (iph->daddr == ip_bcast) || - (iph->protocol == IPPROTO_IGMP)) { + if (is_broadcast_ether_addr(eth_data->h_dest) || + iph->daddr == ip_bcast || + iph->protocol == IPPROTO_IGMP) { do_tx_balance = false; break; } @@ -1426,7 +1422,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) /* IPv6 doesn't really use broadcast mac address, but leave * that here just in case. */ - if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) { + if (is_broadcast_ether_addr(eth_data->h_dest)) { do_tx_balance = false; break; } @@ -1482,8 +1478,24 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) } if (do_tx_balance) { - hash_index = _simple_hash(hash_start, hash_size); - tx_slave = tlb_choose_channel(bond, hash_index, skb->len); + if (bond->params.tlb_dynamic_lb) { + hash_index = _simple_hash(hash_start, hash_size); + tx_slave = tlb_choose_channel(bond, hash_index, skb->len); + } else { + /* + * do_tx_balance means we are free to select the tx_slave + * So we do exactly what tlb would do for hash selection + */ + + struct bond_up_slave *slaves; + unsigned int count; + + slaves = rcu_dereference(bond->slave_arr); + count = slaves ? READ_ONCE(slaves->count) : 0; + if (likely(count)) + tx_slave = slaves->arr[bond_xmit_hash(bond, skb) % + count]; + } } return bond_do_alb_xmit(skb, bond, tx_slave); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1f1e97b26f95..bd53a71f6b00 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -159,7 +159,7 @@ module_param(min_links, int, 0); MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier"); module_param(xmit_hash_policy, charp, 0); -MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " +MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; " "0 for layer 2 (default), 1 for layer 3+4, " "2 for layer 2+3, 3 for encap layer 2+3, " "4 for encap layer 3+4"); @@ -247,7 +247,7 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, BUILD_BUG_ON(sizeof(skb->queue_mapping) != sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); if (unlikely(netpoll_tx_running(bond->dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); @@ -1107,7 +1107,8 @@ static void bond_compute_features(struct bonding *bond) done: bond_dev->vlan_features = vlan_features; - bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; + bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_GSO_UDP_L4; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); @@ -1217,12 +1218,37 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) } } +static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, + enum netdev_lag_tx_type type) +{ + if (type != NETDEV_LAG_TX_TYPE_HASH) + return NETDEV_LAG_HASH_NONE; + + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + return NETDEV_LAG_HASH_L2; + case BOND_XMIT_POLICY_LAYER34: + return NETDEV_LAG_HASH_L34; + case BOND_XMIT_POLICY_LAYER23: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_ENCAP23: + return NETDEV_LAG_HASH_E23; + case BOND_XMIT_POLICY_ENCAP34: + return NETDEV_LAG_HASH_E34; + default: + return NETDEV_LAG_HASH_UNKNOWN; + } +} + static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, struct netlink_ext_ack *extack) { struct netdev_lag_upper_info lag_upper_info; + enum netdev_lag_tx_type type; - lag_upper_info.tx_type = bond_lag_tx_type(bond); + type = bond_lag_tx_type(bond); + lag_upper_info.tx_type = type; + lag_upper_info.hash_type = bond_lag_hash_type(bond, type); return netdev_master_upper_dev_link(slave->dev, bond->dev, slave, &lag_upper_info, extack); @@ -1735,7 +1761,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, unblock_netpoll_tx(); } - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); bond->nest_level = dev_get_nest_level(bond_dev); @@ -1870,7 +1896,7 @@ static int __bond_release_one(struct net_device *bond_dev, if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave); - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, slave); netdev_info(bond_dev, "Releasing %s interface %s\n", @@ -2137,6 +2163,24 @@ static int bond_miimon_inspect(struct bonding *bond) return commit; } +static void bond_miimon_link_change(struct bonding *bond, + struct slave *slave, + char link) +{ + switch (BOND_MODE(bond)) { + case BOND_MODE_8023AD: + bond_3ad_handle_link_change(slave, link); + break; + case BOND_MODE_TLB: + case BOND_MODE_ALB: + bond_alb_handle_link_change(bond, slave, link); + break; + case BOND_MODE_XOR: + bond_update_slave_arr(bond, NULL); + break; + } +} + static void bond_miimon_commit(struct bonding *bond) { struct list_head *iter; @@ -2178,16 +2222,7 @@ static void bond_miimon_commit(struct bonding *bond) slave->speed == SPEED_UNKNOWN ? 0 : slave->speed, slave->duplex ? "full" : "half"); - /* notify ad that the link status has changed */ - if (BOND_MODE(bond) == BOND_MODE_8023AD) - bond_3ad_handle_link_change(slave, BOND_LINK_UP); - - if (bond_is_lb(bond)) - bond_alb_handle_link_change(bond, slave, - BOND_LINK_UP); - - if (BOND_MODE(bond) == BOND_MODE_XOR) - bond_update_slave_arr(bond, NULL); + bond_miimon_link_change(bond, slave, BOND_LINK_UP); if (!bond->curr_active_slave || slave == primary) goto do_failover; @@ -2209,16 +2244,7 @@ static void bond_miimon_commit(struct bonding *bond) netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n", slave->dev->name); - if (BOND_MODE(bond) == BOND_MODE_8023AD) - bond_3ad_handle_link_change(slave, - BOND_LINK_DOWN); - - if (bond_is_lb(bond)) - bond_alb_handle_link_change(bond, slave, - BOND_LINK_DOWN); - - if (BOND_MODE(bond) == BOND_MODE_XOR) - bond_update_slave_arr(bond, NULL); + bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); if (slave == rcu_access_pointer(bond->curr_active_slave)) goto do_failover; @@ -3102,7 +3128,7 @@ static int bond_slave_netdev_event(unsigned long event, * events. If these (miimon/arpmon) parameters are configured * then array gets refreshed twice and that should be fine! */ - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); break; case NETDEV_CHANGEMTU: @@ -3322,7 +3348,7 @@ static int bond_open(struct net_device *bond_dev) */ if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) return -ENOMEM; - if (bond->params.tlb_dynamic_lb) + if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) queue_delayed_work(bond->wq, &bond->alb_work, 0); } @@ -3341,7 +3367,7 @@ static int bond_open(struct net_device *bond_dev) bond_3ad_initiate_agg_selection(bond, 1); } - if (bond_mode_uses_xmit_hash(bond)) + if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); return 0; @@ -3807,7 +3833,8 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond) return slave_id; } -static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) +static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, + struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct iphdr *iph = ip_hdr(skb); @@ -3843,7 +3870,8 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev /* In active-backup mode, we know that bond->curr_active_slave is always valid if * the bond has a usable interface. */ -static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) +static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, + struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave; @@ -3892,7 +3920,7 @@ err: * to determine the slave interface - * (a) BOND_MODE_8023AD * (b) BOND_MODE_XOR - * (c) BOND_MODE_TLB && tlb_dynamic_lb == 0 + * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0 * * The caller is expected to hold RTNL only and NO other lock! */ @@ -3945,6 +3973,11 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) continue; if (skipslave == slave) continue; + + netdev_dbg(bond->dev, + "Adding slave dev %s to tx hash array[%d]\n", + slave->dev->name, new_arr->count); + new_arr->arr[new_arr->count++] = slave; } @@ -3981,7 +4014,8 @@ out: * usable slave array is formed in the control path. The xmit function * just calculates hash and sends the packet out. */ -static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, + struct net_device *dev) { struct bonding *bond = netdev_priv(dev); struct slave *slave; @@ -4001,7 +4035,8 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) } /* in broadcast mode, we send everything to all usable interfaces. */ -static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) +static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, + struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = NULL; @@ -4038,12 +4073,12 @@ static inline int bond_slave_override(struct bonding *bond, struct slave *slave = NULL; struct list_head *iter; - if (!skb->queue_mapping) + if (!skb_rx_queue_recorded(skb)) return 1; /* Find out if any slaves have the same mapping as this skb. */ bond_for_each_slave_rcu(bond, slave, iter) { - if (slave->queue_id == skb->queue_mapping) { + if (slave->queue_id == skb_get_queue_mapping(skb)) { if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { bond_dev_queue_xmit(bond, skb, slave->dev); @@ -4069,7 +4104,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; /* Save the original txq to restore before passing to the driver */ - qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb); if (unlikely(txq >= dev->real_num_tx_queues)) { do { @@ -4259,7 +4294,7 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; + bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; bond_dev->features |= bond_dev->hw_features; } @@ -4320,9 +4355,9 @@ static int bond_check_params(struct bond_params *params) } if (xmit_hash_policy) { - if ((bond_mode != BOND_MODE_XOR) && - (bond_mode != BOND_MODE_8023AD) && - (bond_mode != BOND_MODE_TLB)) { + if (bond_mode == BOND_MODE_ROUNDROBIN || + bond_mode == BOND_MODE_ACTIVEBACKUP || + bond_mode == BOND_MODE_BROADCAST) { pr_info("xmit_hash_policy param is irrelevant in mode %s\n", bond_mode_name(bond_mode)); } else { diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 58c705f24f96..8a945c9341d6 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -395,7 +395,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .id = BOND_OPT_TLB_DYNAMIC_LB, .name = "tlb_dynamic_lb", .desc = "Enable dynamic flow shuffling", - .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)), + .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB) | BIT(BOND_MODE_ALB)), .values = bond_tlb_dynamic_lb_tbl, .flags = BOND_OPTFLAG_IFDOWN, .set = bond_option_tlb_dynamic_lb_set, diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 3da5fca77cbd..5e010b1592f7 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -684,7 +684,8 @@ static int b53_switch_reset(struct b53_device *dev) * still use this driver as a library and need to perform the reset * earlier. */ - if (dev->chip_id == BCM58XX_DEVICE_ID) { + if (dev->chip_id == BCM58XX_DEVICE_ID || + dev->chip_id == BCM583XX_DEVICE_ID) { b53_read8(dev, B53_CTRL_PAGE, B53_SOFTRESET, ®); reg |= SW_RST | EN_SW_RST | EN_CH_RST; b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, reg); @@ -806,16 +807,39 @@ static unsigned int b53_get_mib_size(struct b53_device *dev) return B53_MIBS_SIZE; } -void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +static struct phy_device *b53_get_phy_device(struct dsa_switch *ds, int port) +{ + /* These ports typically do not have built-in PHYs */ + switch (port) { + case B53_CPU_PORT_25: + case 7: + case B53_CPU_PORT: + return NULL; + } + + return mdiobus_get_phy(ds->slave_mii_bus, port); +} + +void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) { struct b53_device *dev = ds->priv; const struct b53_mib_desc *mibs = b53_get_mib(dev); unsigned int mib_size = b53_get_mib_size(dev); + struct phy_device *phydev; unsigned int i; - for (i = 0; i < mib_size; i++) - strlcpy(data + i * ETH_GSTRING_LEN, - mibs[i].name, ETH_GSTRING_LEN); + if (stringset == ETH_SS_STATS) { + for (i = 0; i < mib_size; i++) + strlcpy(data + i * ETH_GSTRING_LEN, + mibs[i].name, ETH_GSTRING_LEN); + } else if (stringset == ETH_SS_PHY_STATS) { + phydev = b53_get_phy_device(ds, port); + if (!phydev) + return; + + phy_ethtool_get_strings(phydev, data); + } } EXPORT_SYMBOL(b53_get_strings); @@ -852,11 +876,34 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) } EXPORT_SYMBOL(b53_get_ethtool_stats); -int b53_get_sset_count(struct dsa_switch *ds, int port) +void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data) +{ + struct phy_device *phydev; + + phydev = b53_get_phy_device(ds, port); + if (!phydev) + return; + + phy_ethtool_get_stats(phydev, NULL, data); +} +EXPORT_SYMBOL(b53_get_ethtool_phy_stats); + +int b53_get_sset_count(struct dsa_switch *ds, int port, int sset) { struct b53_device *dev = ds->priv; + struct phy_device *phydev; + + if (sset == ETH_SS_STATS) { + return b53_get_mib_size(dev); + } else if (sset == ETH_SS_PHY_STATS) { + phydev = b53_get_phy_device(ds, port); + if (!phydev) + return 0; + + return phy_ethtool_get_sset_count(phydev); + } - return b53_get_mib_size(dev); + return 0; } EXPORT_SYMBOL(b53_get_sset_count); @@ -1477,7 +1524,7 @@ void b53_br_fast_age(struct dsa_switch *ds, int port) } EXPORT_SYMBOL(b53_br_fast_age); -static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) +static bool b53_possible_cpu_port(struct dsa_switch *ds, int port) { /* Broadcom switches will accept enabling Broadcom tags on the * following ports: 5, 7 and 8, any other port is not supported @@ -1489,10 +1536,19 @@ static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) return true; } - dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", port); return false; } +static bool b53_can_enable_brcm_tags(struct dsa_switch *ds, int port) +{ + bool ret = b53_possible_cpu_port(ds, port); + + if (!ret) + dev_warn(ds->dev, "Port %d is not Broadcom tag capable\n", + port); + return ret; +} + enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; @@ -1650,6 +1706,7 @@ static const struct dsa_switch_ops b53_switch_ops = { .get_strings = b53_get_strings, .get_ethtool_stats = b53_get_ethtool_stats, .get_sset_count = b53_get_sset_count, + .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, .adjust_link = b53_adjust_link, @@ -1880,6 +1937,18 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, { + .chip_id = BCM583XX_DEVICE_ID, + .dev_name = "BCM583xx/11360", + .vlans = 4096, + .enabled_ports = 0x103, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { .chip_id = BCM7445_DEVICE_ID, .dev_name = "BCM7445", .vlans = 4096, @@ -1966,6 +2035,15 @@ static int b53_switch_init(struct b53_device *dev) dev->num_ports = dev->cpu_port + 1; dev->enabled_ports |= BIT(dev->cpu_port); + /* Include non standard CPU port built-in PHYs to be probed */ + if (is539x(dev) || is531x5(dev)) { + for (i = 0; i < dev->num_ports; i++) { + if (!(dev->ds->phys_mii_mask & BIT(i)) && + !b53_possible_cpu_port(dev->ds, i)) + dev->ds->phys_mii_mask |= BIT(i); + } + } + dev->ports = devm_kzalloc(dev->dev, sizeof(struct b53_port) * dev->num_ports, GFP_KERNEL); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 3b57f47d0e79..df149756c282 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -62,6 +62,7 @@ enum { BCM53018_DEVICE_ID = 0x53018, BCM53019_DEVICE_ID = 0x53019, BCM58XX_DEVICE_ID = 0x5800, + BCM583XX_DEVICE_ID = 0x58300, BCM7445_DEVICE_ID = 0x7445, BCM7278_DEVICE_ID = 0x7278, }; @@ -181,6 +182,7 @@ static inline int is5301x(struct b53_device *dev) static inline int is58xx(struct b53_device *dev) { return dev->chip_id == BCM58XX_DEVICE_ID || + dev->chip_id == BCM583XX_DEVICE_ID || dev->chip_id == BCM7445_DEVICE_ID || dev->chip_id == BCM7278_DEVICE_ID; } @@ -287,9 +289,11 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev) /* Exported functions towards other drivers */ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port); int b53_configure_vlan(struct dsa_switch *ds); -void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data); +void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data); void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data); -int b53_get_sset_count(struct dsa_switch *ds, int port); +int b53_get_sset_count(struct dsa_switch *ds, int port, int sset); +void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data); int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index c37ffd1b6833..8247481eaa06 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -364,7 +364,7 @@ static const struct of_device_id b53_srab_of_match[] = { { .compatible = "brcm,bcm53018-srab" }, { .compatible = "brcm,bcm53019-srab" }, { .compatible = "brcm,bcm5301x-srab" }, - { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm11360-srab", .data = (void *)BCM583XX_DEVICE_ID }, { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID }, @@ -372,7 +372,7 @@ static const struct of_device_id b53_srab_of_match[] = { { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID }, { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID }, - { .compatible = "brcm,cygnus-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,cygnus-srab", .data = (void *)BCM583XX_DEVICE_ID }, { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID }, { /* sentinel */ }, }; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 0378eded31f2..02e8982519ce 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/mii.h> #include <linux/of.h> #include <linux/of_irq.h> @@ -306,7 +307,8 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { - struct bcm_sf2_priv *priv = dev_id; + struct dsa_switch *ds = dev_id; + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) & ~priv->irq0_mask; @@ -317,16 +319,21 @@ static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id) { - struct bcm_sf2_priv *priv = dev_id; + struct dsa_switch *ds = dev_id; + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) & ~priv->irq1_mask; intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); - if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) - priv->port_sts[7].link = 1; - if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) - priv->port_sts[7].link = 0; + if (priv->irq1_stat & P_LINK_UP_IRQ(P7_IRQ_OFF)) { + priv->port_sts[7].link = true; + dsa_port_phylink_mac_change(ds, 7, true); + } + if (priv->irq1_stat & P_LINK_DOWN_IRQ(P7_IRQ_OFF)) { + priv->port_sts[7].link = false; + dsa_port_phylink_mac_change(ds, 7, false); + } return IRQ_HANDLED; } @@ -443,12 +450,8 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->slave_mii_bus->parent = ds->dev->parent; priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - if (dn) - err = of_mdiobus_register(priv->slave_mii_bus, dn); - else - err = mdiobus_register(priv->slave_mii_bus); - - if (err) + err = of_mdiobus_register(priv->slave_mii_bus, dn); + if (err && dn) of_node_put(dn); return err; @@ -473,13 +476,56 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) return priv->hw_params.gphy_rev; } -static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_GMII && + state->interface != PHY_INTERFACE_MODE_INTERNAL && + state->interface != PHY_INTERFACE_MODE_MOCA) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, + "Unsupported interface: %d\n", state->interface); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII and Reverse MII, we support Gigabit, + * including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->dev->ports[port].eee; u32 id_mode_dis = 0, port_mode; - const char *str = NULL; u32 reg, offset; if (priv->type == BCM7445_DEVICE_ID) @@ -487,62 +533,48 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, else offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); - switch (phydev->interface) { + switch (state->interface) { case PHY_INTERFACE_MODE_RGMII: - str = "RGMII (no delay)"; id_mode_dis = 1; + /* fallthrough */ case PHY_INTERFACE_MODE_RGMII_TXID: - if (!str) - str = "RGMII (TX delay)"; port_mode = EXT_GPHY; break; case PHY_INTERFACE_MODE_MII: - str = "MII"; port_mode = EXT_EPHY; break; case PHY_INTERFACE_MODE_REVMII: - str = "Reverse MII"; port_mode = EXT_REVMII; break; default: - /* All other PHYs: internal and MoCA */ + /* all other PHYs: internal and MoCA */ goto force_link; } - /* If the link is down, just disable the interface to conserve power */ - if (!phydev->link) { - reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); - reg &= ~RGMII_MODE_EN; - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); - goto force_link; - } - - /* Clear id_mode_dis bit, and the existing port mode, but - * make sure we enable the RGMII block for data to pass + /* Clear id_mode_dis bit, and the existing port mode, let + * RGMII_MODE_EN bet set by mac_link_{up,down} */ reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); reg &= ~ID_MODE_DIS; reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); - reg |= port_mode | RGMII_MODE_EN; + reg |= port_mode; if (id_mode_dis) reg |= ID_MODE_DIS; - if (phydev->pause) { - if (phydev->asym_pause) + if (state->pause & MLO_PAUSE_TXRX_MASK) { + if (state->pause & MLO_PAUSE_TX) reg |= TX_PAUSE_EN; reg |= RX_PAUSE_EN; } reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); - pr_info("Port %d configured for %s\n", port, str); - force_link: /* Force link settings detected from the PHY */ reg = SW_OVERRIDE; - switch (phydev->speed) { + switch (state->speed) { case SPEED_1000: reg |= SPDSTS_1000 << SPEED_SHIFT; break; @@ -551,33 +583,61 @@ force_link: break; } - if (phydev->link) + if (state->link) reg |= LINK_STS; - if (phydev->duplex == DUPLEX_FULL) + if (state->duplex == DUPLEX_FULL) reg |= DUPLX_MODE; core_writel(priv, reg, offset); - - if (!phydev->is_pseudo_fixed_link) - p->eee_enabled = b53_eee_init(ds, port, phydev); } -static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, - struct fixed_phy_status *status) +static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, + phy_interface_t interface, bool link) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - u32 duplex, pause, offset; u32 reg; - if (priv->type == BCM7445_DEVICE_ID) - offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + if (!phy_interface_mode_is_rgmii(interface) && + interface != PHY_INTERFACE_MODE_MII && + interface != PHY_INTERFACE_MODE_REVMII) + return; + + /* If the link is down, just disable the interface to conserve power */ + reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + if (link) + reg |= RGMII_MODE_EN; else - offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + reg &= ~RGMII_MODE_EN; + reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); +} - duplex = core_readl(priv, CORE_DUPSTS); - pause = core_readl(priv, CORE_PAUSESTS); +static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + bcm_sf2_sw_mac_link_set(ds, port, interface, false); +} - status->link = 0; +static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct ethtool_eee *p = &priv->dev->ports[port].eee; + + bcm_sf2_sw_mac_link_set(ds, port, interface, true); + + if (mode == MLO_AN_PHY && phydev) + p->eee_enabled = b53_eee_init(ds, port, phydev); +} + +static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port, + struct phylink_link_state *status) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + + status->link = false; /* MoCA port is special as we do not get link status from CORE_LNKSTS, * which means that we need to force the link at the port override @@ -596,28 +656,10 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, */ if (!status->link) netif_carrier_off(ds->ports[port].slave); - status->duplex = 1; + status->duplex = DUPLEX_FULL; } else { - status->link = 1; - status->duplex = !!(duplex & (1 << port)); - } - - reg = core_readl(priv, offset); - reg |= SW_OVERRIDE; - if (status->link) - reg |= LINK_STS; - else - reg &= ~LINK_STS; - core_writel(priv, reg, offset); - - if ((pause & (1 << port)) && - (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { - status->asym_pause = 1; - status->pause = 1; + status->link = true; } - - if (pause & (1 << port)) - status->pause = 1; } static void bcm_sf2_enable_acb(struct dsa_switch *ds) @@ -859,9 +901,13 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .get_strings = b53_get_strings, .get_ethtool_stats = b53_get_ethtool_stats, .get_sset_count = b53_get_sset_count, + .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .get_phy_flags = bcm_sf2_sw_get_phy_flags, - .adjust_link = bcm_sf2_sw_adjust_link, - .fixed_link_update = bcm_sf2_sw_fixed_link_update, + .phylink_validate = bcm_sf2_sw_validate, + .phylink_mac_config = bcm_sf2_sw_mac_config, + .phylink_mac_link_down = bcm_sf2_sw_mac_link_down, + .phylink_mac_link_up = bcm_sf2_sw_mac_link_up, + .phylink_fixed_state = bcm_sf2_sw_fixed_state, .suspend = bcm_sf2_sw_suspend, .resume = bcm_sf2_sw_resume, .get_wol = bcm_sf2_sw_get_wol, @@ -1064,14 +1110,14 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) bcm_sf2_intr_disable(priv); ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, - "switch_0", priv); + "switch_0", ds); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_mdio; } ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, - "switch_1", priv); + "switch_1", ds); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); goto out_mdio; diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index f77be9f85cb3..816f34d64736 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -67,7 +67,7 @@ static struct phy_device *phydevs[PHY_MAX_ADDR]; static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds, int port) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d\n", __func__, port); return DSA_TAG_PROTO_NONE; } @@ -86,16 +86,23 @@ static int dsa_loop_setup(struct dsa_switch *ds) return 0; } -static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port) +static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS && sset != ETH_SS_PHY_STATS) + return 0; + return __DSA_LOOP_CNT_MAX; } -static void dsa_loop_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +static void dsa_loop_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) { struct dsa_loop_priv *ps = ds->priv; unsigned int i; + if (stringset != ETH_SS_STATS && stringset != ETH_SS_PHY_STATS) + return; + for (i = 0; i < __DSA_LOOP_CNT_MAX; i++) memcpy(data + i * ETH_GSTRING_LEN, ps->ports[port].mib[i].name, ETH_GSTRING_LEN); @@ -117,8 +124,6 @@ static int dsa_loop_phy_read(struct dsa_switch *ds, int port, int regnum) struct mii_bus *bus = ps->bus; int ret; - dev_dbg(ds->dev, "%s\n", __func__); - ret = mdiobus_read_nested(bus, ps->port_base + port, regnum); if (ret < 0) ps->ports[port].mib[DSA_LOOP_PHY_READ_ERR].val++; @@ -135,8 +140,6 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port, struct mii_bus *bus = ps->bus; int ret; - dev_dbg(ds->dev, "%s\n", __func__); - ret = mdiobus_write_nested(bus, ps->port_base + port, regnum, value); if (ret < 0) ps->ports[port].mib[DSA_LOOP_PHY_WRITE_ERR].val++; @@ -149,7 +152,8 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port, static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", + __func__, port, bridge->name); return 0; } @@ -157,19 +161,22 @@ static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port, static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *bridge) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n", + __func__, port, bridge->name); } static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, state: %d\n", + __func__, port, state); } static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n", + __func__, port, vlan_filtering); return 0; } @@ -181,7 +188,8 @@ dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port, struct dsa_loop_priv *ps = ds->priv; struct mii_bus *bus = ps->bus; - dev_dbg(ds->dev, "%s\n", __func__); + dev_dbg(ds->dev, "%s: port: %d, vlan: %d-%d", + __func__, port, vlan->vid_begin, vlan->vid_end); /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); @@ -202,8 +210,6 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port, struct dsa_loop_vlan *vl; u16 vid; - dev_dbg(ds->dev, "%s\n", __func__); - /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); @@ -215,6 +221,9 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port, vl->untagged |= BIT(port); else vl->untagged &= ~BIT(port); + + dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n", + __func__, port, vid, untagged ? "un" : "", pvid); } if (pvid) @@ -230,8 +239,6 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, struct dsa_loop_vlan *vl; u16 vid, pvid = ps->pvid; - dev_dbg(ds->dev, "%s\n", __func__); - /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); @@ -244,6 +251,9 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, if (pvid == vid) pvid = 1; + + dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n", + __func__, port, vid, untagged ? "un" : "", pvid); } ps->pvid = pvid; @@ -256,6 +266,7 @@ static const struct dsa_switch_ops dsa_loop_driver = { .get_strings = dsa_loop_get_strings, .get_ethtool_stats = dsa_loop_get_ethtool_stats, .get_sset_count = dsa_loop_get_sset_count, + .get_ethtool_phy_stats = dsa_loop_get_ethtool_stats, .phy_read = dsa_loop_phy_read, .phy_write = dsa_loop_phy_write, .port_bridge_join = dsa_loop_port_bridge_join, diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index fefa454f3e56..b4f6e1a67dd9 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -977,10 +977,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = { { .offset = LAN9303_MAC_TX_LATECOL_0, .name = "TxLateCol", }, }; -static void lan9303_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +static void lan9303_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) { unsigned int u; + if (stringset != ETH_SS_STATS) + return; + for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name, ETH_GSTRING_LEN); @@ -1007,8 +1011,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, } } -static int lan9303_get_sset_count(struct dsa_switch *ds, int port) +static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return ARRAY_SIZE(lan9303_mib); } diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index bcb3e6c734f2..7210c49b7922 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -439,15 +439,22 @@ static void ksz_disable_port(struct dsa_switch *ds, int port, ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true); } -static int ksz_sset_count(struct dsa_switch *ds, int port) +static int ksz_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return TOTAL_SWITCH_COUNTER_NUM; } -static void ksz_get_strings(struct dsa_switch *ds, int port, uint8_t *buf) +static void ksz_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) { int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, ETH_GSTRING_LEN); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 80a4dbc3a499..62e486652e62 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -573,10 +573,14 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum, } static void -mt7530_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) { int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < ARRAY_SIZE(mt7530_mib); i++) strncpy(data + i * ETH_GSTRING_LEN, mt7530_mib[i].name, ETH_GSTRING_LEN); @@ -604,8 +608,11 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port, } static int -mt7530_get_sset_count(struct dsa_switch *ds, int port) +mt7530_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return ARRAY_SIZE(mt7530_mib); } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5b4374f21d76..437cd6eb4faa 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -28,9 +28,11 @@ #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> +#include <linux/platform_data/mv88e6xxx.h> #include <linux/netdevice.h> #include <linux/gpio/consumer.h> #include <linux/phy.h> +#include <linux/phylink.h> #include <net/dsa.h> #include "chip.h" @@ -580,6 +582,83 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to configure MAC\n", port); } +static void mv88e6xxx_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ +} + +static int mv88e6xxx_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_link_state(chip, port, state); + mutex_unlock(&chip->reg_lock); + + return err; +} + +static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int speed, duplex, link, err; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + link = LINK_FORCED_UP; + speed = state->speed; + duplex = state->duplex; + } else { + speed = SPEED_UNFORCED; + duplex = DUPLEX_UNFORCED; + link = LINK_UNFORCED; + } + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, + state->interface); + mutex_unlock(&chip->reg_lock); + + if (err && err != -EOPNOTSUPP) + dev_err(ds->dev, "p%d: failed to configure MAC\n", port); +} + +static void mv88e6xxx_mac_link_force(struct dsa_switch *ds, int port, int link) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mutex_lock(&chip->reg_lock); + err = chip->info->ops->port_set_link(chip, port, link); + mutex_unlock(&chip->reg_lock); + + if (err) + dev_err(chip->dev, "p%d: failed to force MAC link\n", port); +} + +static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + if (mode == MLO_AN_FIXED) + mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_DOWN); +} + +static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, phy_interface_t interface, + struct phy_device *phydev) +{ + if (mode == MLO_AN_FIXED) + mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP); +} + static int mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) { if (!chip->info->ops->stats_snapshot) @@ -665,13 +744,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, case STATS_TYPE_PORT: err = mv88e6xxx_port_read(chip, port, s->reg, ®); if (err) - return UINT64_MAX; + return U64_MAX; low = reg; if (s->size == 4) { err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®); if (err) - return UINT64_MAX; + return U64_MAX; high = reg; } break; @@ -685,7 +764,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, mv88e6xxx_g1_stats_read(chip, reg + 1, &high); break; default: - return UINT64_MAX; + return U64_MAX; } value = (((u64)high) << 16) | low; return value; @@ -742,11 +821,14 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data) } static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, - uint8_t *data) + u32 stringset, uint8_t *data) { struct mv88e6xxx_chip *chip = ds->priv; int count = 0; + if (stringset != ETH_SS_STATS) + return; + mutex_lock(&chip->reg_lock); if (chip->info->ops->stats_get_strings) @@ -789,12 +871,15 @@ static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip) STATS_TYPE_BANK1); } -static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port) +static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port, int sset) { struct mv88e6xxx_chip *chip = ds->priv; int serdes_count = 0; int count = 0; + if (sset != ETH_SS_STATS) + return 0; + mutex_lock(&chip->reg_lock); if (chip->info->ops->stats_get_sset_count) count = chip->info->ops->stats_get_sset_count(chip); @@ -911,14 +996,6 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, } -static int mv88e6xxx_stats_set_histogram(struct mv88e6xxx_chip *chip) -{ - if (chip->info->ops->stats_set_histogram) - return chip->info->ops->stats_set_histogram(chip); - - return 0; -} - static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) { return 32 * sizeof(u16); @@ -1020,6 +1097,76 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_pri_setup(struct mv88e6xxx_chip *chip) +{ + int err; + + if (chip->info->ops->ieee_pri_map) { + err = chip->info->ops->ieee_pri_map(chip); + if (err) + return err; + } + + if (chip->info->ops->ip_pri_map) { + err = chip->info->ops->ip_pri_map(chip); + if (err) + return err; + } + + return 0; +} + +static int mv88e6xxx_devmap_setup(struct mv88e6xxx_chip *chip) +{ + int target, port; + int err; + + if (!chip->info->global2_addr) + return 0; + + /* Initialize the routing port to the 32 possible target devices */ + for (target = 0; target < 32; target++) { + port = 0x1f; + if (target < DSA_MAX_SWITCHES) + if (chip->ds->rtable[target] != DSA_RTABLE_NONE) + port = chip->ds->rtable[target]; + + err = mv88e6xxx_g2_device_mapping_write(chip, target, port); + if (err) + return err; + } + + if (chip->info->ops->set_cascade_port) { + port = MV88E6XXX_CASCADE_PORT_MULTIPLE; + err = chip->info->ops->set_cascade_port(chip, port); + if (err) + return err; + } + + err = mv88e6xxx_g1_set_device_number(chip, chip->ds->index); + if (err) + return err; + + return 0; +} + +static int mv88e6xxx_trunk_setup(struct mv88e6xxx_chip *chip) +{ + /* Clear all trunk masks and mapping */ + if (chip->info->global2_addr) + return mv88e6xxx_g2_trunk_clear(chip); + + return 0; +} + +static int mv88e6xxx_rmu_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->rmu_disable) + return chip->info->ops->rmu_disable(chip); + + return 0; +} + static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) { if (chip->info->ops->pot_clear) @@ -2113,53 +2260,16 @@ static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, return err; } -static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) { - struct dsa_switch *ds = chip->ds; int err; - /* Disable remote management, and set the switch's DSA device number. */ - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, - MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE | - (ds->index & 0x1f)); - if (err) - return err; - - /* Configure the IP ToS mapping registers. */ - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff); - if (err) - return err; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff); - if (err) - return err; - - /* Configure the IEEE 802.1p priority mapping register. */ - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41); - if (err) - return err; - /* Initialize the statistics unit */ - err = mv88e6xxx_stats_set_histogram(chip); - if (err) - return err; + if (chip->info->ops->stats_set_histogram) { + err = chip->info->ops->stats_set_histogram(chip); + if (err) + return err; + } return mv88e6xxx_g1_stats_clear(chip); } @@ -2185,18 +2295,6 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; } - /* Setup Switch Global 1 Registers */ - err = mv88e6xxx_g1_setup(chip); - if (err) - goto unlock; - - /* Setup Switch Global 2 Registers */ - if (chip->info->global2_addr) { - err = mv88e6xxx_g2_setup(chip); - if (err) - goto unlock; - } - err = mv88e6xxx_irl_setup(chip); if (err) goto unlock; @@ -2229,10 +2327,26 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; + err = mv88e6xxx_rmu_setup(chip); + if (err) + goto unlock; + err = mv88e6xxx_rsvd2cpu_setup(chip); if (err) goto unlock; + err = mv88e6xxx_trunk_setup(chip); + if (err) + goto unlock; + + err = mv88e6xxx_devmap_setup(chip); + if (err) + goto unlock; + + err = mv88e6xxx_pri_setup(chip); + if (err) + goto unlock; + /* Setup PTP Hardware Clock and timestamping */ if (chip->info->ptp_support) { err = mv88e6xxx_ptp_setup(chip); @@ -2244,6 +2358,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; } + err = mv88e6xxx_stats_setup(chip); + if (err) + goto unlock; + unlock: mutex_unlock(&chip->reg_lock); @@ -2337,10 +2455,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, return err; } - if (np) - err = of_mdiobus_register(bus, np); - else - err = mdiobus_register(bus); + err = of_mdiobus_register(bus, np); if (err) { dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); mv88e6xxx_g2_irq_mdio_free(chip, bus); @@ -2460,6 +2575,8 @@ static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, static const struct mv88e6xxx_ops mv88e6085_ops = { /* MV88E6XXX_FAMILY_6097 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, @@ -2488,12 +2605,16 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, + .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6095_ops = { /* MV88E6XXX_FAMILY_6095 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, @@ -2518,6 +2639,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { static const struct mv88e6xxx_ops mv88e6097_ops = { /* MV88E6XXX_FAMILY_6097 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2545,12 +2668,15 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6085_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, }; static const struct mv88e6xxx_ops mv88e6123_ops = { /* MV88E6XXX_FAMILY_6165 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2579,6 +2705,8 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { static const struct mv88e6xxx_ops mv88e6131_ops = { /* MV88E6XXX_FAMILY_6185 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, @@ -2603,6 +2731,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, + .set_cascade_port = mv88e6185_g1_set_cascade_port, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, @@ -2611,6 +2740,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { static const struct mv88e6xxx_ops mv88e6141_ops = { /* MV88E6XXX_FAMILY_6341 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -2648,6 +2779,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { static const struct mv88e6xxx_ops mv88e6161_ops = { /* MV88E6XXX_FAMILY_6165 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2681,6 +2814,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { static const struct mv88e6xxx_ops mv88e6165_ops = { /* MV88E6XXX_FAMILY_6165 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6165_phy_read, @@ -2707,6 +2842,8 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { static const struct mv88e6xxx_ops mv88e6171_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2741,6 +2878,8 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { static const struct mv88e6xxx_ops mv88e6172_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -2771,6 +2910,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -2779,6 +2919,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { static const struct mv88e6xxx_ops mv88e6175_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -2809,10 +2951,13 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .serdes_power = mv88e6341_serdes_power, }; static const struct mv88e6xxx_ops mv88e6176_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -2843,6 +2988,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -2851,6 +2997,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { static const struct mv88e6xxx_ops mv88e6185_ops = { /* MV88E6XXX_FAMILY_6185 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .set_switch_mac = mv88e6xxx_g1_set_switch_mac, .phy_read = mv88e6185_phy_ppu_read, .phy_write = mv88e6185_phy_ppu_write, @@ -2870,6 +3018,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, + .set_cascade_port = mv88e6185_g1_set_cascade_port, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2907,6 +3056,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -2943,6 +3093,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -2979,6 +3130,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -2986,6 +3138,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { static const struct mv88e6xxx_ops mv88e6240_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3016,6 +3170,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -3054,6 +3209,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3063,6 +3219,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { static const struct mv88e6xxx_ops mv88e6320_ops = { /* MV88E6XXX_FAMILY_6320 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3099,6 +3257,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { static const struct mv88e6xxx_ops mv88e6321_ops = { /* MV88E6XXX_FAMILY_6320 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3133,6 +3293,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { static const struct mv88e6xxx_ops mv88e6341_ops = { /* MV88E6XXX_FAMILY_6341 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom8, .set_eeprom = mv88e6xxx_g2_set_eeprom8, @@ -3171,6 +3333,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { static const struct mv88e6xxx_ops mv88e6350_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -3205,6 +3369,8 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { static const struct mv88e6xxx_ops mv88e6351_ops = { /* MV88E6XXX_FAMILY_6351 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, @@ -3240,6 +3406,8 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { static const struct mv88e6xxx_ops mv88e6352_ops = { /* MV88E6XXX_FAMILY_6352 */ + .ieee_pri_map = mv88e6085_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, @@ -3270,6 +3438,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6352_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, @@ -3313,6 +3482,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -3353,6 +3523,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4125,6 +4296,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_tag_protocol = mv88e6xxx_get_tag_protocol, .setup = mv88e6xxx_setup, .adjust_link = mv88e6xxx_adjust_link, + .phylink_validate = mv88e6xxx_validate, + .phylink_mac_link_state = mv88e6xxx_link_state, + .phylink_mac_config = mv88e6xxx_mac_config, + .phylink_mac_link_down = mv88e6xxx_mac_link_down, + .phylink_mac_link_up = mv88e6xxx_mac_link_up, .get_strings = mv88e6xxx_get_strings, .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, @@ -4175,6 +4351,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip) return -ENOMEM; ds->priv = chip; + ds->dev = dev; ds->ops = &mv88e6xxx_switch_ops; ds->ageing_time_min = chip->info->age_time_coeff; ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX; @@ -4189,42 +4366,85 @@ static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) dsa_unregister_switch(chip->ds); } +static const void *pdata_device_get_match_data(struct device *dev) +{ + const struct of_device_id *matches = dev->driver->of_match_table; + const struct dsa_mv88e6xxx_pdata *pdata = dev->platform_data; + + for (; matches->name[0] || matches->type[0] || matches->compatible[0]; + matches++) { + if (!strcmp(pdata->compatible, matches->compatible)) + return matches->data; + } + return NULL; +} + static int mv88e6xxx_probe(struct mdio_device *mdiodev) { + struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data; + const struct mv88e6xxx_info *compat_info = NULL; struct device *dev = &mdiodev->dev; struct device_node *np = dev->of_node; - const struct mv88e6xxx_info *compat_info; struct mv88e6xxx_chip *chip; - u32 eeprom_len; + int port; int err; - compat_info = of_device_get_match_data(dev); + if (!np && !pdata) + return -EINVAL; + + if (np) + compat_info = of_device_get_match_data(dev); + + if (pdata) { + compat_info = pdata_device_get_match_data(dev); + + if (!pdata->netdev) + return -EINVAL; + + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!(pdata->enabled_ports & (1 << port))) + continue; + if (strcmp(pdata->cd.port_names[port], "cpu")) + continue; + pdata->cd.netdev[port] = &pdata->netdev->dev; + break; + } + } + if (!compat_info) return -EINVAL; chip = mv88e6xxx_alloc_chip(dev); - if (!chip) - return -ENOMEM; + if (!chip) { + err = -ENOMEM; + goto out; + } chip->info = compat_info; err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); if (err) - return err; + goto out; chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(chip->reset)) - return PTR_ERR(chip->reset); + if (IS_ERR(chip->reset)) { + err = PTR_ERR(chip->reset); + goto out; + } err = mv88e6xxx_detect(chip); if (err) - return err; + goto out; mv88e6xxx_phy_init(chip); - if (chip->info->ops->get_eeprom && - !of_property_read_u32(np, "eeprom-length", &eeprom_len)) - chip->eeprom_len = eeprom_len; + if (chip->info->ops->get_eeprom) { + if (np) + of_property_read_u32(np, "eeprom-length", + &chip->eeprom_len); + else + chip->eeprom_len = pdata->eeprom_len; + } mutex_lock(&chip->reg_lock); err = mv88e6xxx_switch_reset(chip); @@ -4293,6 +4513,9 @@ out_g1_irq: mv88e6xxx_irq_poll_free(chip); mutex_unlock(&chip->reg_lock); out: + if (pdata) + dev_put(pdata->netdev); + return err; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 12b7f4649b25..8ac3fbb15352 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -21,10 +21,6 @@ #include <linux/timecounter.h> #include <net/dsa.h> -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif - #define SMI_CMD 0x00 #define SMI_CMD_BUSY BIT(15) #define SMI_CMD_CLAUSE_22 BIT(12) @@ -242,7 +238,7 @@ struct mv88e6xxx_chip { struct gpio_desc *reset; /* set to size of eeprom if supported by the switch */ - int eeprom_len; + u32 eeprom_len; /* List of mdio busses */ struct list_head mdios; @@ -298,6 +294,9 @@ struct mv88e6xxx_mdio_bus { }; struct mv88e6xxx_ops { + int (*ieee_pri_map)(struct mv88e6xxx_chip *chip); + int (*ip_pri_map)(struct mv88e6xxx_chip *chip); + /* Ingress Rate Limit unit (IRL) operations */ int (*irl_init_all)(struct mv88e6xxx_chip *chip, int port); @@ -406,6 +405,12 @@ struct mv88e6xxx_ops { uint64_t *data); int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port); int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); + +#define MV88E6XXX_CASCADE_PORT_NONE 0xe +#define MV88E6XXX_CASCADE_PORT_MULTIPLE 0xf + + int (*set_cascade_port)(struct mv88e6xxx_chip *chip, int port); + const struct mv88e6xxx_irq_ops *watchdog_ops; int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); @@ -431,6 +436,9 @@ struct mv88e6xxx_ops { /* Interface to the AVB/PTP registers */ const struct mv88e6xxx_avb_ops *avb_ops; + + /* Remote Management Unit operations */ + int (*rmu_disable)(struct mv88e6xxx_chip *chip); }; struct mv88e6xxx_irq_ops { diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index b43bd6476632..d721ccf7d8be 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -241,6 +241,64 @@ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip) return mv88e6185_g1_wait_ppu_disabled(chip); } +/* Offset 0x10: IP-PRI Mapping Register 0 + * Offset 0x11: IP-PRI Mapping Register 1 + * Offset 0x12: IP-PRI Mapping Register 2 + * Offset 0x13: IP-PRI Mapping Register 3 + * Offset 0x14: IP-PRI Mapping Register 4 + * Offset 0x15: IP-PRI Mapping Register 5 + * Offset 0x16: IP-PRI Mapping Register 6 + * Offset 0x17: IP-PRI Mapping Register 7 + */ + +int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip) +{ + int err; + + /* Reset the IP TOS/DiffServ/Traffic priorities to defaults */ + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_0, 0x0000); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_1, 0x0000); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_2, 0x5555); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_3, 0x5555); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_4, 0xaaaa); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_5, 0xaaaa); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_6, 0xffff); + if (err) + return err; + + err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IP_PRI_7, 0xffff); + if (err) + return err; + + return 0; +} + +/* Offset 0x18: IEEE-PRI Register */ + +int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip) +{ + /* Reset the IEEE Tag priorities to defaults */ + return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_IEEE_PRI, 0xfa41); +} + /* Offset 0x1a: Monitor Control */ /* Offset 0x1a: Monitor & MGMT Control on some devices */ @@ -350,20 +408,59 @@ int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) /* Offset 0x1c: Global Control 2 */ -int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_g1_ctl2_mask(struct mv88e6xxx_chip *chip, u16 mask, + u16 val) { - u16 val; + u16 reg; int err; - err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, &val); + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL2, ®); if (err) return err; - val |= MV88E6XXX_G1_CTL2_HIST_RX_TX; + reg &= ~mask; + reg |= val & mask; - err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, val); + return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL2, reg); +} - return err; +int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port) +{ + const u16 mask = MV88E6185_G1_CTL2_CASCADE_PORT_MASK; + + return mv88e6xxx_g1_ctl2_mask(chip, mask, port << __bf_shf(mask)); +} + +int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6085_G1_CTL2_P10RM | + MV88E6085_G1_CTL2_RM_ENABLE, 0); +} + +int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6352_G1_CTL2_RMU_MODE_MASK, + MV88E6352_G1_CTL2_RMU_MODE_DISABLED); +} + +int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_RMU_MODE_MASK, + MV88E6390_G1_CTL2_RMU_MODE_DISABLED); +} + +int mv88e6390_g1_stats_set_histogram(struct mv88e6xxx_chip *chip) +{ + return mv88e6xxx_g1_ctl2_mask(chip, MV88E6390_G1_CTL2_HIST_MODE_MASK, + MV88E6390_G1_CTL2_HIST_MODE_RX | + MV88E6390_G1_CTL2_HIST_MODE_TX); +} + +int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index) +{ + return mv88e6xxx_g1_ctl2_mask(chip, + MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK, + index); } /* Offset 0x1d: Statistics Operation 2 */ diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 6aee7316fea6..7c791c1da4b9 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -201,11 +201,35 @@ /* Offset 0x1C: Global Control 2 */ #define MV88E6XXX_G1_CTL2 0x1c -#define MV88E6XXX_G1_CTL2_NO_CASCADE 0xe000 -#define MV88E6XXX_G1_CTL2_MULTIPLE_CASCADE 0xf000 -#define MV88E6XXX_G1_CTL2_HIST_RX 0x0040 -#define MV88E6XXX_G1_CTL2_HIST_TX 0x0080 -#define MV88E6XXX_G1_CTL2_HIST_RX_TX 0x00c0 +#define MV88E6185_G1_CTL2_CASCADE_PORT_MASK 0xf000 +#define MV88E6185_G1_CTL2_CASCADE_PORT_NONE 0xe000 +#define MV88E6185_G1_CTL2_CASCADE_PORT_MULTI 0xf000 +#define MV88E6352_G1_CTL2_HEADER_TYPE_MASK 0xc000 +#define MV88E6352_G1_CTL2_HEADER_TYPE_ORIG 0x0000 +#define MV88E6352_G1_CTL2_HEADER_TYPE_MGMT 0x4000 +#define MV88E6390_G1_CTL2_HEADER_TYPE_LAG 0x8000 +#define MV88E6352_G1_CTL2_RMU_MODE_MASK 0x3000 +#define MV88E6352_G1_CTL2_RMU_MODE_DISABLED 0x0000 +#define MV88E6352_G1_CTL2_RMU_MODE_PORT_4 0x1000 +#define MV88E6352_G1_CTL2_RMU_MODE_PORT_5 0x2000 +#define MV88E6352_G1_CTL2_RMU_MODE_PORT_6 0x3000 +#define MV88E6085_G1_CTL2_DA_CHECK 0x4000 +#define MV88E6085_G1_CTL2_P10RM 0x2000 +#define MV88E6085_G1_CTL2_RM_ENABLE 0x1000 +#define MV88E6352_G1_CTL2_DA_CHECK 0x0800 +#define MV88E6390_G1_CTL2_RMU_MODE_MASK 0x0700 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_0 0x0000 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_1 0x0100 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_9 0x0200 +#define MV88E6390_G1_CTL2_RMU_MODE_PORT_10 0x0300 +#define MV88E6390_G1_CTL2_RMU_MODE_ALL_DSA 0x0600 +#define MV88E6390_G1_CTL2_RMU_MODE_DISABLED 0x0700 +#define MV88E6390_G1_CTL2_HIST_MODE_MASK 0x00c0 +#define MV88E6390_G1_CTL2_HIST_MODE_RX 0x0040 +#define MV88E6390_G1_CTL2_HIST_MODE_TX 0x0080 +#define MV88E6352_G1_CTL2_CTR_MODE_MASK 0x0060 +#define MV88E6390_G1_CTL2_CTR_MODE 0x0020 +#define MV88E6XXX_G1_CTL2_DEVICE_NUMBER_MASK 0x001f /* Offset 0x1D: Stats Operation Register */ #define MV88E6XXX_G1_STATS_OP 0x1d @@ -253,6 +277,17 @@ int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip); +int mv88e6085_g1_ieee_pri_map(struct mv88e6xxx_chip *chip); + +int mv88e6185_g1_set_cascade_port(struct mv88e6xxx_chip *chip, int port); + +int mv88e6085_g1_rmu_disable(struct mv88e6xxx_chip *chip); +int mv88e6352_g1_rmu_disable(struct mv88e6xxx_chip *chip); +int mv88e6390_g1_rmu_disable(struct mv88e6xxx_chip *chip); + +int mv88e6xxx_g1_set_device_number(struct mv88e6xxx_chip *chip, int index); + int mv88e6xxx_g1_atu_set_learn2all(struct mv88e6xxx_chip *chip, bool learn2all); int mv88e6xxx_g1_atu_set_age_time(struct mv88e6xxx_chip *chip, unsigned int msecs); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 8d22d66d84b7..91a3cb2452ac 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -119,37 +119,17 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) /* Offset 0x06: Device Mapping Table register */ -static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, - int target, int port) +int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, + int port) { - u16 val = (target << 8) | (port & 0xf); + u16 val = (target << 8) | (port & 0x1f); + /* Modern chips use 5 bits to define a device mapping port, + * but bit 4 is reserved on older chips, so it is safe to use. + */ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_DEVICE_MAPPING, val); } -static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip) -{ - int target, port; - int err; - - /* Initialize the routing port to the 32 possible target devices */ - for (target = 0; target < 32; ++target) { - port = 0xf; - - if (target < DSA_MAX_SWITCHES) { - port = chip->ds->rtable[target]; - if (port == DSA_RTABLE_NONE) - port = 0xf; - } - - err = mv88e6xxx_g2_device_mapping_write(chip, target, port); - if (err) - break; - } - - return err; -} - /* Offset 0x07: Trunk Mask Table register */ static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num, @@ -174,7 +154,7 @@ static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id, return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_TRUNK_MAPPING, val); } -static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip) { const u16 port_mask = BIT(mv88e6xxx_num_ports(chip)) - 1; int i, err; @@ -1067,9 +1047,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) { int err, irq, virq; - if (!chip->dev->of_node) - return -EINVAL; - chip->g2_irq.domain = irq_domain_add_simple( chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip); if (!chip->g2_irq.domain) @@ -1138,31 +1115,3 @@ void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip, for (phy = 0; phy < chip->info->num_internal_phys; phy++) irq_dispose_mapping(bus->irq[phy]); } - -int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) -{ - u16 reg; - int err; - - /* Ignore removed tag data on doubly tagged packets, disable - * flow control messages, force flow control priority to the - * highest, and send all special multicast frames to the CPU - * port at the highest priority. - */ - reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4); - err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg); - if (err) - return err; - - /* Program the DSA routing table. */ - err = mv88e6xxx_g2_set_device_mapping(chip); - if (err) - return err; - - /* Clear all trunk masks and mapping. */ - err = mv88e6xxx_g2_clear_trunk(chip); - if (err) - return err; - - return 0; -} diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 520ec70d32e8..37e8ce2c72a0 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -60,7 +60,8 @@ #define MV88E6XXX_G2_DEVICE_MAPPING 0x06 #define MV88E6XXX_G2_DEVICE_MAPPING_UPDATE 0x8000 #define MV88E6XXX_G2_DEVICE_MAPPING_DEV_MASK 0x1f00 -#define MV88E6XXX_G2_DEVICE_MAPPING_PORT_MASK 0x000f +#define MV88E6352_G2_DEVICE_MAPPING_PORT_MASK 0x000f +#define MV88E6390_G2_DEVICE_MAPPING_PORT_MASK 0x001f /* Offset 0x07: Trunk Mask Table Register */ #define MV88E6XXX_G2_TRUNK_MASK 0x07 @@ -313,7 +314,6 @@ int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, int src_port, u16 data); int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); -int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); @@ -327,6 +327,11 @@ int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip); + +int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target, + int port); + extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; @@ -441,11 +446,6 @@ static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) return -EOPNOTSUPP; } -static inline int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) -{ - return -EOPNOTSUPP; -} - static inline int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip) { return -EOPNOTSUPP; @@ -495,6 +495,17 @@ static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip, return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, + int target, int port) +{ + return -EOPNOTSUPP; +} + #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */ #endif /* _MV88E6XXX_GLOBAL2_H */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 6315774d72b3..429d0ebcd5b1 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -15,6 +15,7 @@ #include <linux/bitfield.h> #include <linux/if_bridge.h> #include <linux/phy.h> +#include <linux/phylink.h> #include "chip.h" #include "port.h" @@ -378,6 +379,44 @@ int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) return 0; } +int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, + struct phylink_link_state *state) +{ + int err; + u16 reg; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); + if (err) + return err; + + switch (reg & MV88E6XXX_PORT_STS_SPEED_MASK) { + case MV88E6XXX_PORT_STS_SPEED_10: + state->speed = SPEED_10; + break; + case MV88E6XXX_PORT_STS_SPEED_100: + state->speed = SPEED_100; + break; + case MV88E6XXX_PORT_STS_SPEED_1000: + state->speed = SPEED_1000; + break; + case MV88E6XXX_PORT_STS_SPEED_10000: + if ((reg &MV88E6XXX_PORT_STS_CMODE_MASK) == + MV88E6XXX_PORT_STS_CMODE_2500BASEX) + state->speed = SPEED_2500; + else + state->speed = SPEED_10000; + break; + } + + state->duplex = reg & MV88E6XXX_PORT_STS_DUPLEX ? + DUPLEX_FULL : DUPLEX_HALF; + state->link = !!(reg & MV88E6XXX_PORT_STS_LINK); + state->an_enabled = 1; + state->an_complete = state->link; + + return 0; +} + /* Offset 0x02: Jamming Control * * Do not limit the period of time that this port can be paused for by diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index b16d5f0e6e9c..5e1db1b221ca 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -29,6 +29,7 @@ #define MV88E6XXX_PORT_STS_SPEED_10 0x0000 #define MV88E6XXX_PORT_STS_SPEED_100 0x0100 #define MV88E6XXX_PORT_STS_SPEED_1000 0x0200 +#define MV88E6XXX_PORT_STS_SPEED_10000 0x0300 #define MV88E6352_PORT_STS_EEE 0x0040 #define MV88E6165_PORT_STS_AM_DIS 0x0040 #define MV88E6185_PORT_STS_MGMII 0x0040 @@ -295,6 +296,8 @@ int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); +int mv88e6xxx_port_link_state(struct mv88e6xxx_chip *chip, int port, + struct phylink_link_state *state); int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port); int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port, int upstream_port); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index fb058fd35c0d..880b2cf0a530 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -326,3 +326,23 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) return 0; } + +int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) +{ + int err; + u8 cmode; + + if (port != 5) + return 0; + + err = mv88e6xxx_port_get_cmode(chip, port, &cmode); + if (err) + return err; + + if (cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X || + cmode == MV88E6XXX_PORT_STS_CMODE_SGMII || + cmode == MV88E6XXX_PORT_STS_CMODE_2500BASEX) + return mv88e6390_serdes_sgmii(chip, MV88E6341_ADDR_SERDES, on); + + return 0; +} diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 1897c01c6e19..b6e5fbd46b5e 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -19,6 +19,8 @@ #define MV88E6352_ADDR_SERDES 0x0f #define MV88E6352_SERDES_PAGE_FIBER 0x01 +#define MV88E6341_ADDR_SERDES 0x15 + #define MV88E6390_PORT9_LANE0 0x09 #define MV88E6390_PORT9_LANE1 0x12 #define MV88E6390_PORT9_LANE2 0x13 @@ -42,6 +44,7 @@ #define MV88E6390_SGMII_CONTROL_LOOPBACK BIT(14) #define MV88E6390_SGMII_CONTROL_PDOWN BIT(11) +int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 600d5ad1fbde..cdcde7f8e0b2 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1,17 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org> * Copyright (c) 2015, The Linux Foundation. All rights reserved. * Copyright (c) 2016 John Crispin <john@phrozen.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include <linux/module.h> @@ -473,10 +465,10 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) static void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) { - u32 mask = QCA8K_PORT_STATUS_TXMAC; + u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; /* Port 0 and 6 have no internal PHY */ - if ((port > 0) && (port < 6)) + if (port > 0 && port < 6) mask |= QCA8K_PORT_STATUS_LINK_AUTO; if (enable) @@ -490,6 +482,7 @@ qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; int ret, i, phy_mode = -1; + u32 mask; /* Make sure that port 0 is the cpu port */ if (!dsa_is_cpu_port(ds, 0)) { @@ -515,7 +508,10 @@ qca8k_setup(struct dsa_switch *ds) if (ret < 0) return ret; - /* Enable CPU Port */ + /* Enable CPU Port, force it to maximum bandwidth and full-duplex */ + mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW | + QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX; + qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask); qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1); @@ -583,6 +579,47 @@ qca8k_setup(struct dsa_switch *ds) return 0; } +static void +qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) +{ + struct qca8k_priv *priv = ds->priv; + u32 reg; + + /* Force fixed-link setting for CPU port, skip others. */ + if (!phy_is_pseudo_fixed_link(phy)) + return; + + /* Set port speed */ + switch (phy->speed) { + case 10: + reg = QCA8K_PORT_STATUS_SPEED_10; + break; + case 100: + reg = QCA8K_PORT_STATUS_SPEED_100; + break; + case 1000: + reg = QCA8K_PORT_STATUS_SPEED_1000; + break; + default: + dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n", + port, phy->speed); + return; + } + + /* Set duplex mode */ + if (phy->duplex == DUPLEX_FULL) + reg |= QCA8K_PORT_STATUS_DUPLEX; + + /* Force flow control */ + if (dsa_is_cpu_port(ds, port)) + reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW; + + /* Force link down before changing MAC options */ + qca8k_port_set_status(priv, port, 0); + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); + qca8k_port_set_status(priv, port, 1); +} + static int qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) { @@ -600,10 +637,13 @@ qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) } static void -qca8k_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) { int i; + if (stringset != ETH_SS_STATS) + return; + for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name, ETH_GSTRING_LEN); @@ -631,8 +671,11 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port, } static int -qca8k_get_sset_count(struct dsa_switch *ds, int port) +qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) { + if (sset != ETH_SS_STATS) + return 0; + return ARRAY_SIZE(ar8327_mib); } @@ -831,6 +874,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port) static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, + .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, .phy_read = qca8k_phy_read, .phy_write = qca8k_phy_write, @@ -862,6 +906,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) return -ENOMEM; priv->bus = mdiodev->bus; + priv->dev = &mdiodev->dev; /* read the switches ID register */ id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); @@ -933,6 +978,7 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, qca8k_suspend, qca8k_resume); static const struct of_device_id qca8k_of_match[] = { + { .compatible = "qca,qca8334" }, { .compatible = "qca,qca8337" }, { /* sentinel */ }, }; diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 1cf8a920d4ff..613fe5c50236 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -51,8 +51,10 @@ #define QCA8K_GOL_MAC_ADDR0 0x60 #define QCA8K_GOL_MAC_ADDR1 0x64 #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) -#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0) -#define QCA8K_PORT_STATUS_SPEED_S 0 +#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) +#define QCA8K_PORT_STATUS_SPEED_10 0 +#define QCA8K_PORT_STATUS_SPEED_100 0x1 +#define QCA8K_PORT_STATUS_SPEED_1000 0x2 #define QCA8K_PORT_STATUS_TXMAC BIT(2) #define QCA8K_PORT_STATUS_RXMAC BIT(3) #define QCA8K_PORT_STATUS_TXFLOW BIT(4) @@ -165,6 +167,7 @@ struct qca8k_priv { struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; struct dsa_switch *ds; struct mutex reg_mutex; + struct device *dev; }; struct qca8k_mib_desc { diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 176861bd2252..5bc168314ea2 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -765,8 +765,9 @@ static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev); static int vortex_rx(struct net_device *dev); static int boomerang_rx(struct net_device *dev); -static irqreturn_t vortex_interrupt(int irq, void *dev_id); -static irqreturn_t boomerang_interrupt(int irq, void *dev_id); +static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id); +static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev); +static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev); static int vortex_close(struct net_device *dev); static void dump_tx_ring(struct net_device *dev); static void update_stats(void __iomem *ioaddr, struct net_device *dev); @@ -838,11 +839,7 @@ MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); #ifdef CONFIG_NET_POLL_CONTROLLER static void poll_vortex(struct net_device *dev) { - struct vortex_private *vp = netdev_priv(dev); - unsigned long flags; - local_irq_save(flags); - (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore(flags); + vortex_boomerang_interrupt(dev->irq, dev); } #endif @@ -1728,8 +1725,7 @@ vortex_open(struct net_device *dev) dma_addr_t dma; /* Use the now-standard shared IRQ implementation. */ - if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? - boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { + if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) { pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); goto err; } @@ -1904,18 +1900,7 @@ static void vortex_tx_timeout(struct net_device *dev) pr_err("%s: Interrupt posted but not delivered --" " IRQ blocked by another device?\n", dev->name); /* Bad idea here.. but we might as well handle a few events. */ - { - /* - * Block interrupts because vortex_interrupt does a bare spin_lock() - */ - unsigned long flags; - local_irq_save(flags); - if (vp->full_bus_master_tx) - boomerang_interrupt(dev->irq, dev); - else - vortex_interrupt(dev->irq, dev); - local_irq_restore(flags); - } + vortex_boomerang_interrupt(dev->irq, dev); } if (vortex_debug > 0) @@ -2266,9 +2251,8 @@ out_dma_err: */ static irqreturn_t -vortex_interrupt(int irq, void *dev_id) +_vortex_interrupt(int irq, struct net_device *dev) { - struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; @@ -2277,7 +2261,6 @@ vortex_interrupt(int irq, void *dev_id) unsigned int bytes_compl = 0, pkts_compl = 0; ioaddr = vp->ioaddr; - spin_lock(&vp->lock); status = ioread16(ioaddr + EL3_STATUS); @@ -2375,7 +2358,6 @@ vortex_interrupt(int irq, void *dev_id) pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); handler_exit: - spin_unlock(&vp->lock); return IRQ_RETVAL(handled); } @@ -2385,9 +2367,8 @@ handler_exit: */ static irqreturn_t -boomerang_interrupt(int irq, void *dev_id) +_boomerang_interrupt(int irq, struct net_device *dev) { - struct net_device *dev = dev_id; struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr; int status; @@ -2397,12 +2378,6 @@ boomerang_interrupt(int irq, void *dev_id) ioaddr = vp->ioaddr; - - /* - * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout - * and boomerang_start_xmit - */ - spin_lock(&vp->lock); vp->handling_irq = 1; status = ioread16(ioaddr + EL3_STATUS); @@ -2521,10 +2496,29 @@ boomerang_interrupt(int irq, void *dev_id) dev->name, status); handler_exit: vp->handling_irq = 0; - spin_unlock(&vp->lock); return IRQ_RETVAL(handled); } +static irqreturn_t +vortex_boomerang_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; + irqreturn_t ret; + + spin_lock_irqsave(&vp->lock, flags); + + if (vp->full_bus_master_rx) + ret = _boomerang_interrupt(dev->irq, dev); + else + ret = _vortex_interrupt(dev->irq, dev); + + spin_unlock_irqrestore(&vp->lock, flags); + + return ret; +} + static int vortex_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 9fee7c83ef9f..f2f0264c58ba 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -29,8 +29,8 @@ config PCMCIA_AXNET called axnet_cs. If unsure, say N. config AX88796 - tristate "ASIX AX88796 NE2000 clone support" - depends on (ARM || MIPS || SUPERH) + tristate "ASIX AX88796 NE2000 clone support" if !ZORRO + depends on (ARM || MIPS || SUPERH || ZORRO || COMPILE_TEST) select CRC32 select PHYLIB select MDIO_BITBANG @@ -45,6 +45,19 @@ config AX88796_93CX6 ---help--- Select this if your platform comes with an external 93CX6 eeprom. +config XSURF100 + tristate "Amiga XSurf 100 AX88796/NE2000 clone support" + depends on ZORRO + select AX88796 + select ASIX_PHY + help + This driver is for the Individual Computers X-Surf 100 Ethernet + card (based on the Asix AX88796 chip). If you have such a card, + say Y. Otherwise, say N. + + To compile this driver as a module, choose M here: the module + will be called xsurf100. + config HYDRA tristate "Hydra support" depends on ZORRO diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile index 1d650e66cc6e..85c83c566ec6 100644 --- a/drivers/net/ethernet/8390/Makefile +++ b/drivers/net/ethernet/8390/Makefile @@ -16,4 +16,5 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o obj-$(CONFIG_WD80x3) += wd.o 8390.o +obj-$(CONFIG_XSURF100) += xsurf100.o obj-$(CONFIG_ZORRO8390) += zorro8390.o diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index da61cf3cb3a9..2a0ddec1dd56 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -163,6 +163,21 @@ static void ax_reset_8390(struct net_device *dev) ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */ } +/* Wrapper for __ei_interrupt for platforms that have a platform-specific + * way to find out whether the interrupt request might be caused by + * the ax88796 chip. + */ +static irqreturn_t ax_ei_interrupt_filtered(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct ax_device *ax = to_ax_dev(dev); + struct platform_device *pdev = to_platform_device(dev->dev.parent); + + if (!ax->plat->check_irq(pdev)) + return IRQ_NONE; + + return ax_ei_interrupt(irq, dev_id); +} static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) @@ -387,6 +402,90 @@ static void ax_phy_switch(struct net_device *dev, int on) ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); } +static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (level) + ax->reg_memr |= AX_MEMR_MDC; + else + ax->reg_memr &= ~AX_MEMR_MDC; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (output) + ax->reg_memr &= ~AX_MEMR_MDIR; + else + ax->reg_memr |= AX_MEMR_MDIR; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (value) + ax->reg_memr |= AX_MEMR_MDO; + else + ax->reg_memr &= ~AX_MEMR_MDO; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + int reg_memr = ei_inb(ax->addr_memr); + + return reg_memr & AX_MEMR_MDI ? 1 : 0; +} + +static const struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = ax_bb_mdc, + .set_mdio_dir = ax_bb_dir, + .set_mdio_data = ax_bb_set_data, + .get_mdio_data = ax_bb_get_data, +}; + +static int ax_mii_init(struct net_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + int err; + + ax->bb_ctrl.ops = &bb_ops; + ax->addr_memr = ei_local->mem + AX_MEMR; + ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); + if (!ax->mii_bus) { + err = -ENOMEM; + goto out; + } + + ax->mii_bus->name = "ax88796_mii_bus"; + ax->mii_bus->parent = dev->dev.parent; + snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + err = mdiobus_register(ax->mii_bus); + if (err) + goto out_free_mdio_bitbang; + + return 0; + + out_free_mdio_bitbang: + free_mdio_bitbang(ax->mii_bus); + out: + return err; +} + static int ax_open(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); @@ -394,8 +493,16 @@ static int ax_open(struct net_device *dev) netdev_dbg(dev, "open\n"); - ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, - dev->name, dev); + ret = ax_mii_init(dev); + if (ret) + goto failed_mii; + + if (ax->plat->check_irq) + ret = request_irq(dev->irq, ax_ei_interrupt_filtered, + ax->irqflags, dev->name, dev); + else + ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, + dev->name, dev); if (ret) goto failed_request_irq; @@ -421,6 +528,10 @@ static int ax_open(struct net_device *dev) ax_phy_switch(dev, 0); free_irq(dev->irq, dev); failed_request_irq: + /* unregister mdiobus */ + mdiobus_unregister(ax->mii_bus); + free_mdio_bitbang(ax->mii_bus); + failed_mii: return ret; } @@ -440,6 +551,9 @@ static int ax_close(struct net_device *dev) phy_disconnect(dev->phydev); free_irq(dev->irq, dev); + + mdiobus_unregister(ax->mii_bus); + free_mdio_bitbang(ax->mii_bus); return 0; } @@ -539,92 +653,8 @@ static const struct net_device_ops ax_netdev_ops = { #endif }; -static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (level) - ax->reg_memr |= AX_MEMR_MDC; - else - ax->reg_memr &= ~AX_MEMR_MDC; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (output) - ax->reg_memr &= ~AX_MEMR_MDIR; - else - ax->reg_memr |= AX_MEMR_MDIR; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - - if (value) - ax->reg_memr |= AX_MEMR_MDO; - else - ax->reg_memr &= ~AX_MEMR_MDO; - - ei_outb(ax->reg_memr, ax->addr_memr); -} - -static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) -{ - struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); - int reg_memr = ei_inb(ax->addr_memr); - - return reg_memr & AX_MEMR_MDI ? 1 : 0; -} - -static const struct mdiobb_ops bb_ops = { - .owner = THIS_MODULE, - .set_mdc = ax_bb_mdc, - .set_mdio_dir = ax_bb_dir, - .set_mdio_data = ax_bb_set_data, - .get_mdio_data = ax_bb_get_data, -}; - /* setup code */ -static int ax_mii_init(struct net_device *dev) -{ - struct platform_device *pdev = to_platform_device(dev->dev.parent); - struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); - int err; - - ax->bb_ctrl.ops = &bb_ops; - ax->addr_memr = ei_local->mem + AX_MEMR; - ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); - if (!ax->mii_bus) { - err = -ENOMEM; - goto out; - } - - ax->mii_bus->name = "ax88796_mii_bus"; - ax->mii_bus->parent = dev->dev.parent; - snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); - - err = mdiobus_register(ax->mii_bus); - if (err) - goto out_free_mdio_bitbang; - - return 0; - - out_free_mdio_bitbang: - free_mdio_bitbang(ax->mii_bus); - out: - return err; -} - static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) { void __iomem *ioaddr = ei_local->mem; @@ -669,10 +699,16 @@ static int ax_init_dev(struct net_device *dev) if (ax->plat->flags & AXFLG_HAS_EEPROM) { unsigned char SA_prom[32]; + ei_outb(6, ioaddr + EN0_RCNTLO); + ei_outb(0, ioaddr + EN0_RCNTHI); + ei_outb(0, ioaddr + EN0_RSARLO); + ei_outb(0, ioaddr + EN0_RSARHI); + ei_outb(E8390_RREAD + E8390_START, ioaddr + NE_CMD); for (i = 0; i < sizeof(SA_prom); i += 2) { SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); } + ei_outb(ENISR_RDC, ioaddr + EN0_ISR); /* Ack intr. */ if (ax->plat->wordlength == 2) for (i = 0; i < 16; i++) @@ -741,18 +777,20 @@ static int ax_init_dev(struct net_device *dev) #endif ei_local->reset_8390 = &ax_reset_8390; - ei_local->block_input = &ax_block_input; - ei_local->block_output = &ax_block_output; + if (ax->plat->block_input) + ei_local->block_input = ax->plat->block_input; + else + ei_local->block_input = &ax_block_input; + if (ax->plat->block_output) + ei_local->block_output = ax->plat->block_output; + else + ei_local->block_output = &ax_block_output; ei_local->get_8390_hdr = &ax_get_8390_hdr; ei_local->priv = 0; dev->netdev_ops = &ax_netdev_ops; dev->ethtool_ops = &ax_ethtool_ops; - ret = ax_mii_init(dev); - if (ret) - goto err_out; - ax_NS8390_init(dev, 0); ret = register_netdev(dev); @@ -777,7 +815,6 @@ static int ax_remove(struct platform_device *pdev) struct resource *mem; unregister_netdev(dev); - free_irq(dev->irq, dev); iounmap(ei_local->mem); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -789,6 +826,7 @@ static int ax_remove(struct platform_device *pdev) release_mem_region(mem->start, resource_size(mem)); } + platform_set_drvdata(pdev, NULL); free_netdev(dev); return 0; @@ -835,6 +873,9 @@ static int ax_probe(struct platform_device *pdev) dev->irq = irq->start; ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; + if (irq->flags & IORESOURCE_IRQ_SHAREABLE) + ax->irqflags |= IRQF_SHARED; + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no MEM specified\n"); @@ -919,6 +960,7 @@ static int ax_probe(struct platform_device *pdev) release_mem_region(mem->start, mem_size); exit_mem: + platform_set_drvdata(pdev, NULL); free_netdev(dev); return ret; diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c new file mode 100644 index 000000000000..e2c963821ffe --- /dev/null +++ b/drivers/net/ethernet/8390/xsurf100.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/zorro.h> +#include <net/ax88796.h> +#include <asm/amigaints.h> + +#define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100 \ + ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x64, 0) + +#define XS100_IRQSTATUS_BASE 0x40 +#define XS100_8390_BASE 0x800 + +/* Longword-access area. Translated to 2 16-bit access cycles by the + * X-Surf 100 FPGA + */ +#define XS100_8390_DATA32_BASE 0x8000 +#define XS100_8390_DATA32_SIZE 0x2000 +/* Sub-Areas for fast data register access; addresses relative to area begin */ +#define XS100_8390_DATA_READ32_BASE 0x0880 +#define XS100_8390_DATA_WRITE32_BASE 0x0C80 +#define XS100_8390_DATA_AREA_SIZE 0x80 + +#define __NS8390_init ax_NS8390_init + +/* force unsigned long back to 'void __iomem *' */ +#define ax_convert_addr(_a) ((void __force __iomem *)(_a)) + +#define ei_inb(_a) z_readb(ax_convert_addr(_a)) +#define ei_outb(_v, _a) z_writeb(_v, ax_convert_addr(_a)) + +#define ei_inw(_a) z_readw(ax_convert_addr(_a)) +#define ei_outw(_v, _a) z_writew(_v, ax_convert_addr(_a)) + +#define ei_inb_p(_a) ei_inb(_a) +#define ei_outb_p(_v, _a) ei_outb(_v, _a) + +/* define EI_SHIFT() to take into account our register offsets */ +#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) + +/* Ensure we have our RCR base value */ +#define AX88796_PLATFORM + +static unsigned char version[] = + "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; + +#include "lib8390.c" + +/* from ne.c */ +#define NE_CMD EI_SHIFT(0x00) +#define NE_RESET EI_SHIFT(0x1f) +#define NE_DATAPORT EI_SHIFT(0x10) + +struct xsurf100_ax_plat_data { + struct ax_plat_data ax; + void __iomem *base_regs; + void __iomem *data_area; +}; + +static int is_xsurf100_network_irq(struct platform_device *pdev) +{ + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + return (readw(xs100->base_regs + XS100_IRQSTATUS_BASE) & 0xaaaa) != 0; +} + +/* These functions guarantee that the iomem is accessed with 32 bit + * cycles only. z_memcpy_fromio / z_memcpy_toio don't + */ +static void z_memcpy_fromio32(void *dst, const void __iomem *src, size_t bytes) +{ + while (bytes > 32) { + asm __volatile__ + ("movem.l (%0)+,%%d0-%%d7\n" + "movem.l %%d0-%%d7,(%1)\n" + "adda.l #32,%1" : "=a"(src), "=a"(dst) + : "0"(src), "1"(dst) : "d0", "d1", "d2", "d3", "d4", + "d5", "d6", "d7", "memory"); + bytes -= 32; + } + while (bytes) { + *(uint32_t *)dst = z_readl(src); + src += 4; + dst += 4; + bytes -= 4; + } +} + +static void z_memcpy_toio32(void __iomem *dst, const void *src, size_t bytes) +{ + while (bytes) { + z_writel(*(const uint32_t *)src, dst); + src += 4; + dst += 4; + bytes -= 4; + } +} + +static void xs100_write(struct net_device *dev, const void *src, + unsigned int count) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + /* copy whole blocks */ + while (count > XS100_8390_DATA_AREA_SIZE) { + z_memcpy_toio32(xs100->data_area + + XS100_8390_DATA_WRITE32_BASE, src, + XS100_8390_DATA_AREA_SIZE); + src += XS100_8390_DATA_AREA_SIZE; + count -= XS100_8390_DATA_AREA_SIZE; + } + /* copy whole dwords */ + z_memcpy_toio32(xs100->data_area + XS100_8390_DATA_WRITE32_BASE, + src, count & ~3); + src += count & ~3; + if (count & 2) { + ei_outw(*(uint16_t *)src, ei_local->mem + NE_DATAPORT); + src += 2; + } + if (count & 1) + ei_outb(*(uint8_t *)src, ei_local->mem + NE_DATAPORT); +} + +static void xs100_read(struct net_device *dev, void *dst, unsigned int count) +{ + struct ei_device *ei_local = netdev_priv(dev); + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + /* copy whole blocks */ + while (count > XS100_8390_DATA_AREA_SIZE) { + z_memcpy_fromio32(dst, xs100->data_area + + XS100_8390_DATA_READ32_BASE, + XS100_8390_DATA_AREA_SIZE); + dst += XS100_8390_DATA_AREA_SIZE; + count -= XS100_8390_DATA_AREA_SIZE; + } + /* copy whole dwords */ + z_memcpy_fromio32(dst, xs100->data_area + XS100_8390_DATA_READ32_BASE, + count & ~3); + dst += count & ~3; + if (count & 2) { + *(uint16_t *)dst = ei_inw(ei_local->mem + NE_DATAPORT); + dst += 2; + } + if (count & 1) + *(uint8_t *)dst = ei_inb(ei_local->mem + NE_DATAPORT); +} + +/* Block input and output, similar to the Crynwr packet driver. If + * you are porting to a new ethercard, look at the packet driver + * source for hints. The NEx000 doesn't share the on-board packet + * memory -- you have to put the packet out through the "remote DMA" + * dataport using ei_outb. + */ +static void xs100_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + char *buf = skb->data; + + if (ei_local->dmaing) { + netdev_err(dev, + "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); + ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); + ei_outb(E8390_RREAD + E8390_START, nic_base + NE_CMD); + + xs100_read(dev, buf, count); + + ei_local->dmaing &= ~1; +} + +static void xs100_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + struct ei_device *ei_local = netdev_priv(dev); + void __iomem *nic_base = ei_local->mem; + unsigned long dma_start; + + /* Round the count up for word writes. Do we need to do this? + * What effect will an odd byte count have on the 8390? I + * should check someday. + */ + if (ei_local->word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing + * you'll see + */ + if (ei_local->dmaing) { + netdev_err(dev, + "DMAing conflict in %s [DMAstat:%d][irqlock:%d]\n", + __func__, + ei_local->dmaing, ei_local->irqlock); + return; + } + + ei_local->dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, nic_base + NE_CMD); + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); + + /* Now the normal output. */ + ei_outb(count & 0xff, nic_base + EN0_RCNTLO); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(0x00, nic_base + EN0_RSARLO); + ei_outb(start_page, nic_base + EN0_RSARHI); + + ei_outb(E8390_RWRITE + E8390_START, nic_base + NE_CMD); + + xs100_write(dev, buf, count); + + dma_start = jiffies; + + while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { + if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); + ei_local->reset_8390(dev); + ax_NS8390_init(dev, 1); + break; + } + } + + ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ + ei_local->dmaing &= ~0x01; +} + +static int xsurf100_probe(struct zorro_dev *zdev, + const struct zorro_device_id *ent) +{ + struct platform_device *pdev; + struct xsurf100_ax_plat_data ax88796_data; + struct resource res[2] = { + DEFINE_RES_NAMED(IRQ_AMIGA_PORTS, 1, NULL, + IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE), + DEFINE_RES_MEM(zdev->resource.start + XS100_8390_BASE, + 4 * 0x20) + }; + int reg; + /* This table is referenced in the device structure, so it must + * outlive the scope of xsurf100_probe. + */ + static u32 reg_offsets[32]; + int ret = 0; + + /* X-Surf 100 control and 32 bit ring buffer data access areas. + * These resources are not used by the ax88796 driver, so must + * be requested here and passed via platform data. + */ + + if (!request_mem_region(zdev->resource.start, 0x100, zdev->name)) { + dev_err(&zdev->dev, "cannot reserve X-Surf 100 control registers\n"); + return -ENXIO; + } + + if (!request_mem_region(zdev->resource.start + + XS100_8390_DATA32_BASE, + XS100_8390_DATA32_SIZE, + "X-Surf 100 32-bit data access")) { + dev_err(&zdev->dev, "cannot reserve 32-bit area\n"); + ret = -ENXIO; + goto exit_req; + } + + for (reg = 0; reg < 0x20; reg++) + reg_offsets[reg] = 4 * reg; + + memset(&ax88796_data, 0, sizeof(ax88796_data)); + ax88796_data.ax.flags = AXFLG_HAS_EEPROM; + ax88796_data.ax.wordlength = 2; + ax88796_data.ax.dcr_val = 0x48; + ax88796_data.ax.rcr_val = 0x40; + ax88796_data.ax.reg_offsets = reg_offsets; + ax88796_data.ax.check_irq = is_xsurf100_network_irq; + ax88796_data.base_regs = ioremap(zdev->resource.start, 0x100); + + /* error handling for ioremap regs */ + if (!ax88796_data.base_regs) { + dev_err(&zdev->dev, "Cannot ioremap area %pR (registers)\n", + &zdev->resource); + + ret = -ENXIO; + goto exit_req2; + } + + ax88796_data.data_area = ioremap(zdev->resource.start + + XS100_8390_DATA32_BASE, XS100_8390_DATA32_SIZE); + + /* error handling for ioremap data */ + if (!ax88796_data.data_area) { + dev_err(&zdev->dev, + "Cannot ioremap area %pR offset %x (32-bit access)\n", + &zdev->resource, XS100_8390_DATA32_BASE); + + ret = -ENXIO; + goto exit_mem; + } + + ax88796_data.ax.block_output = xs100_block_output; + ax88796_data.ax.block_input = xs100_block_input; + + pdev = platform_device_register_resndata(&zdev->dev, "ax88796", + zdev->slotaddr, res, 2, + &ax88796_data, + sizeof(ax88796_data)); + + if (IS_ERR(pdev)) { + dev_err(&zdev->dev, "cannot register platform device\n"); + ret = -ENXIO; + goto exit_mem2; + } + + zorro_set_drvdata(zdev, pdev); + + if (!ret) + return 0; + + exit_mem2: + iounmap(ax88796_data.data_area); + + exit_mem: + iounmap(ax88796_data.base_regs); + + exit_req2: + release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE, + XS100_8390_DATA32_SIZE); + + exit_req: + release_mem_region(zdev->resource.start, 0x100); + + return ret; +} + +static void xsurf100_remove(struct zorro_dev *zdev) +{ + struct platform_device *pdev = zorro_get_drvdata(zdev); + struct xsurf100_ax_plat_data *xs100 = dev_get_platdata(&pdev->dev); + + platform_device_unregister(pdev); + + iounmap(xs100->base_regs); + release_mem_region(zdev->resource.start, 0x100); + iounmap(xs100->data_area); + release_mem_region(zdev->resource.start + XS100_8390_DATA32_BASE, + XS100_8390_DATA32_SIZE); +} + +static const struct zorro_device_id xsurf100_zorro_tbl[] = { + { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF100, }, + { 0 } +}; + +MODULE_DEVICE_TABLE(zorro, xsurf100_zorro_tbl); + +static struct zorro_driver xsurf100_driver = { + .name = "xsurf100", + .id_table = xsurf100_zorro_tbl, + .probe = xsurf100_probe, + .remove = xsurf100_remove, +}; + +module_driver(xsurf100_driver, zorro_register_driver, zorro_unregister_driver); + +MODULE_DESCRIPTION("X-Surf 100 driver"); +MODULE_AUTHOR("Michael Karcher <kernel@mkarcher.dialup.fu-berlin.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 603a5704dab8..af766fd61151 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -33,9 +33,9 @@ source "drivers/net/ethernet/aquantia/Kconfig" source "drivers/net/ethernet/arc/Kconfig" source "drivers/net/ethernet/atheros/Kconfig" source "drivers/net/ethernet/aurora/Kconfig" -source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/broadcom/Kconfig" source "drivers/net/ethernet/brocade/Kconfig" +source "drivers/net/ethernet/cadence/Kconfig" source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/cavium/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" @@ -72,16 +72,16 @@ source "drivers/net/ethernet/dec/Kconfig" source "drivers/net/ethernet/dlink/Kconfig" source "drivers/net/ethernet/emulex/Kconfig" source "drivers/net/ethernet/ezchip/Kconfig" -source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/faraday/Kconfig" source "drivers/net/ethernet/freescale/Kconfig" source "drivers/net/ethernet/fujitsu/Kconfig" source "drivers/net/ethernet/hisilicon/Kconfig" source "drivers/net/ethernet/hp/Kconfig" source "drivers/net/ethernet/huawei/Kconfig" +source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" -source "drivers/net/ethernet/i825xx/Kconfig" +source "drivers/net/ethernet/neterion/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" config JME @@ -115,6 +115,7 @@ source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" +source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX @@ -160,20 +161,21 @@ source "drivers/net/ethernet/packetengines/Kconfig" source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" +source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" -source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/rocker/Kconfig" source "drivers/net/ethernet/samsung/Kconfig" source "drivers/net/ethernet/seeq/Kconfig" -source "drivers/net/ethernet/silan/Kconfig" -source "drivers/net/ethernet/sis/Kconfig" source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/sgi/Kconfig" +source "drivers/net/ethernet/silan/Kconfig" +source "drivers/net/ethernet/sis/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" source "drivers/net/ethernet/socionext/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" +source "drivers/net/ethernet/synopsys/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" source "drivers/net/ethernet/ti/Kconfig" source "drivers/net/ethernet/toshiba/Kconfig" @@ -182,6 +184,5 @@ source "drivers/net/ethernet/via/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" -source "drivers/net/ethernet/synopsys/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 2bfd2eea50bf..8fbfe9ce2fa5 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -55,6 +55,7 @@ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ +obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index c99e3e845ac0..a90080f12e67 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1074,16 +1074,12 @@ static int amd8111e_calc_coalesce(struct net_device *dev) amd8111e_set_coalesce(dev,TX_INTR_COAL); coal_conf->tx_coal_type = MEDIUM_COALESCE; } - - } - else if(tx_pkt_size >= 1024){ - if (tx_pkt_size >= 1024){ - if(coal_conf->tx_coal_type != HIGH_COALESCE){ - coal_conf->tx_timeout = 4; - coal_conf->tx_event_count = 8; - amd8111e_set_coalesce(dev,TX_INTR_COAL); - coal_conf->tx_coal_type = HIGH_COALESCE; - } + } else if (tx_pkt_size >= 1024) { + if (coal_conf->tx_coal_type != HIGH_COALESCE) { + coal_conf->tx_timeout = 4; + coal_conf->tx_event_count = 8; + amd8111e_set_coalesce(dev, TX_INTR_COAL); + coal_conf->tx_coal_type = HIGH_COALESCE; } } } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7c204f05b418..24f1053b8785 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1312,14 +1312,83 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) return 0; } +static void xgbe_free_memory(struct xgbe_prv_data *pdata) +{ + struct xgbe_desc_if *desc_if = &pdata->desc_if; + + /* Free the ring descriptors and buffers */ + desc_if->free_ring_resources(pdata); + + /* Free the channel and ring structures */ + xgbe_free_channels(pdata); +} + +static int xgbe_alloc_memory(struct xgbe_prv_data *pdata) +{ + struct xgbe_desc_if *desc_if = &pdata->desc_if; + struct net_device *netdev = pdata->netdev; + int ret; + + if (pdata->new_tx_ring_count) { + pdata->tx_ring_count = pdata->new_tx_ring_count; + pdata->tx_q_count = pdata->tx_ring_count; + pdata->new_tx_ring_count = 0; + } + + if (pdata->new_rx_ring_count) { + pdata->rx_ring_count = pdata->new_rx_ring_count; + pdata->new_rx_ring_count = 0; + } + + /* Calculate the Rx buffer size before allocating rings */ + pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu); + + /* Allocate the channel and ring structures */ + ret = xgbe_alloc_channels(pdata); + if (ret) + return ret; + + /* Allocate the ring descriptors and buffers */ + ret = desc_if->alloc_ring_resources(pdata); + if (ret) + goto err_channels; + + /* Initialize the service and Tx timers */ + xgbe_init_timers(pdata); + + return 0; + +err_channels: + xgbe_free_memory(pdata); + + return ret; +} + static int xgbe_start(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_phy_if *phy_if = &pdata->phy_if; struct net_device *netdev = pdata->netdev; + unsigned int i; int ret; - DBGPR("-->xgbe_start\n"); + /* Set the number of queues */ + ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); + if (ret) { + netdev_err(netdev, "error setting real tx queue count\n"); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); + if (ret) { + netdev_err(netdev, "error setting real rx queue count\n"); + return ret; + } + + /* Set RSS lookup table data for programming */ + for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, + i % pdata->rx_ring_count); ret = hw_if->init(pdata); if (ret) @@ -1347,8 +1416,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata) clear_bit(XGBE_STOPPED, &pdata->dev_state); - DBGPR("<--xgbe_start\n"); - return 0; err_irqs: @@ -1426,10 +1493,22 @@ static void xgbe_stopdev(struct work_struct *work) netdev_alert(pdata->netdev, "device stopped\n"); } -static void xgbe_restart_dev(struct xgbe_prv_data *pdata) +void xgbe_full_restart_dev(struct xgbe_prv_data *pdata) { - DBGPR("-->xgbe_restart_dev\n"); + /* If not running, "restart" will happen on open */ + if (!netif_running(pdata->netdev)) + return; + + xgbe_stop(pdata); + + xgbe_free_memory(pdata); + xgbe_alloc_memory(pdata); + + xgbe_start(pdata); +} +void xgbe_restart_dev(struct xgbe_prv_data *pdata) +{ /* If not running, "restart" will happen on open */ if (!netif_running(pdata->netdev)) return; @@ -1440,8 +1519,6 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) xgbe_free_rx_data(pdata); xgbe_start(pdata); - - DBGPR("<--xgbe_restart_dev\n"); } static void xgbe_restart(struct work_struct *work) @@ -1827,11 +1904,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, static int xgbe_open(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_desc_if *desc_if = &pdata->desc_if; int ret; - DBGPR("-->xgbe_open\n"); - /* Create the various names based on netdev name */ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", netdev_name(netdev)); @@ -1876,43 +1950,25 @@ static int xgbe_open(struct net_device *netdev) goto err_sysclk; } - /* Calculate the Rx buffer size before allocating rings */ - ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); - if (ret < 0) - goto err_ptpclk; - pdata->rx_buf_size = ret; - - /* Allocate the channel and ring structures */ - ret = xgbe_alloc_channels(pdata); - if (ret) - goto err_ptpclk; - - /* Allocate the ring descriptors and buffers */ - ret = desc_if->alloc_ring_resources(pdata); - if (ret) - goto err_channels; - INIT_WORK(&pdata->service_work, xgbe_service); INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); - xgbe_init_timers(pdata); + + ret = xgbe_alloc_memory(pdata); + if (ret) + goto err_ptpclk; ret = xgbe_start(pdata); if (ret) - goto err_rings; + goto err_mem; clear_bit(XGBE_DOWN, &pdata->dev_state); - DBGPR("<--xgbe_open\n"); - return 0; -err_rings: - desc_if->free_ring_resources(pdata); - -err_channels: - xgbe_free_channels(pdata); +err_mem: + xgbe_free_memory(pdata); err_ptpclk: clk_disable_unprepare(pdata->ptpclk); @@ -1932,18 +1988,11 @@ err_dev_wq: static int xgbe_close(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_desc_if *desc_if = &pdata->desc_if; - - DBGPR("-->xgbe_close\n"); /* Stop the device */ xgbe_stop(pdata); - /* Free the ring descriptors and buffers */ - desc_if->free_ring_resources(pdata); - - /* Free the channel and ring structures */ - xgbe_free_channels(pdata); + xgbe_free_memory(pdata); /* Disable the clocks */ clk_disable_unprepare(pdata->ptpclk); @@ -1957,8 +2006,6 @@ static int xgbe_close(struct net_device *netdev) set_bit(XGBE_DOWN, &pdata->dev_state); - DBGPR("<--xgbe_close\n"); - return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index ff397bb25042..a880f10e3e70 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -626,6 +626,217 @@ static int xgbe_get_ts_info(struct net_device *netdev, return 0; } +static int xgbe_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return pdata->phy_if.module_info(pdata, modinfo); +} + +static int xgbe_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + return pdata->phy_if.module_eeprom(pdata, eeprom, data); +} + +static void xgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + + ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX; + ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX; + ringparam->rx_pending = pdata->rx_desc_count; + ringparam->tx_pending = pdata->tx_desc_count; +} + +static int xgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int rx, tx; + + if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) { + netdev_err(netdev, "unsupported ring parameter\n"); + return -EINVAL; + } + + if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) || + (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) { + netdev_err(netdev, + "rx ring parameter must be between %u and %u\n", + XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX); + return -EINVAL; + } + + if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) || + (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) { + netdev_err(netdev, + "tx ring parameter must be between %u and %u\n", + XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX); + return -EINVAL; + } + + rx = __rounddown_pow_of_two(ringparam->rx_pending); + if (rx != ringparam->rx_pending) + netdev_notice(netdev, + "rx ring parameter rounded to power of two: %u\n", + rx); + + tx = __rounddown_pow_of_two(ringparam->tx_pending); + if (tx != ringparam->tx_pending) + netdev_notice(netdev, + "tx ring parameter rounded to power of two: %u\n", + tx); + + if ((rx == pdata->rx_desc_count) && + (tx == pdata->tx_desc_count)) + goto out; + + pdata->rx_desc_count = rx; + pdata->tx_desc_count = tx; + + xgbe_restart_dev(pdata); + +out: + return 0; +} + +static void xgbe_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int rx, tx, combined; + + /* Calculate maximums allowed: + * - Take into account the number of available IRQs + * - Do not take into account the number of online CPUs so that + * the user can over-subscribe if desired + * - Tx is additionally limited by the number of hardware queues + */ + rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); + rx = min(rx, pdata->channel_irq_count); + tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); + tx = min(tx, pdata->channel_irq_count); + tx = min(tx, pdata->tx_max_q_count); + + combined = min(rx, tx); + + channels->max_combined = combined; + channels->max_rx = rx ? rx - 1 : 0; + channels->max_tx = tx ? tx - 1 : 0; + + /* Get current settings based on device state */ + rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count; + tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count; + + combined = min(rx, tx); + rx -= combined; + tx -= combined; + + channels->combined_count = combined; + channels->rx_count = rx; + channels->tx_count = tx; +} + +static void xgbe_print_set_channels_input(struct net_device *netdev, + struct ethtool_channels *channels) +{ + netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n", + channels->combined_count, channels->rx_count, + channels->tx_count); +} + +static int xgbe_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + unsigned int rx, rx_curr, tx, tx_curr, combined; + + /* Calculate maximums allowed: + * - Take into account the number of available IRQs + * - Do not take into account the number of online CPUs so that + * the user can over-subscribe if desired + * - Tx is additionally limited by the number of hardware queues + */ + rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); + rx = min(rx, pdata->channel_irq_count); + tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); + tx = min(tx, pdata->tx_max_q_count); + tx = min(tx, pdata->channel_irq_count); + + combined = min(rx, tx); + + /* Should not be setting other count */ + if (channels->other_count) { + netdev_err(netdev, + "other channel count must be zero\n"); + return -EINVAL; + } + + /* Require at least one Combined (Rx and Tx) channel */ + if (!channels->combined_count) { + netdev_err(netdev, + "at least one combined Rx/Tx channel is required\n"); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + /* Check combined channels */ + if (channels->combined_count > combined) { + netdev_err(netdev, + "combined channel count cannot exceed %u\n", + combined); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + /* Can have some Rx-only or Tx-only channels, but not both */ + if (channels->rx_count && channels->tx_count) { + netdev_err(netdev, + "cannot specify both Rx-only and Tx-only channels\n"); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + /* Check that we don't exceed the maximum number of channels */ + if ((channels->combined_count + channels->rx_count) > rx) { + netdev_err(netdev, + "total Rx channels (%u) requested exceeds maximum available (%u)\n", + channels->combined_count + channels->rx_count, rx); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + if ((channels->combined_count + channels->tx_count) > tx) { + netdev_err(netdev, + "total Tx channels (%u) requested exceeds maximum available (%u)\n", + channels->combined_count + channels->tx_count, tx); + xgbe_print_set_channels_input(netdev, channels); + return -EINVAL; + } + + rx = channels->combined_count + channels->rx_count; + tx = channels->combined_count + channels->tx_count; + + rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count; + tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count; + + if ((rx == rx_curr) && (tx == tx_curr)) + goto out; + + pdata->new_rx_ring_count = rx; + pdata->new_tx_ring_count = tx; + + xgbe_full_restart_dev(pdata); + +out: + return 0; +} + static const struct ethtool_ops xgbe_ethtool_ops = { .get_drvinfo = xgbe_get_drvinfo, .get_msglevel = xgbe_get_msglevel, @@ -646,6 +857,12 @@ static const struct ethtool_ops xgbe_ethtool_ops = { .get_ts_info = xgbe_get_ts_info, .get_link_ksettings = xgbe_get_link_ksettings, .set_link_ksettings = xgbe_set_link_ksettings, + .get_module_info = xgbe_get_module_info, + .get_module_eeprom = xgbe_get_module_eeprom, + .get_ringparam = xgbe_get_ringparam, + .set_ringparam = xgbe_set_ringparam, + .get_channels = xgbe_get_channels, + .set_channels = xgbe_set_channels, }; const struct ethtool_ops *xgbe_get_ethtool_ops(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 441d0973957b..b41f23679a08 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -265,7 +265,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) { struct net_device *netdev = pdata->netdev; struct device *dev = pdata->dev; - unsigned int i; int ret; netdev->irq = pdata->dev_irq; @@ -324,26 +323,9 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) pdata->tx_ring_count, pdata->rx_ring_count); } - /* Set the number of queues */ - ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); - if (ret) { - dev_err(dev, "error setting real tx queue count\n"); - return ret; - } - - ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); - if (ret) { - dev_err(dev, "error setting real rx queue count\n"); - return ret; - } - - /* Initialize RSS hash key and lookup table */ + /* Initialize RSS hash key */ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); - for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) - XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, - i % pdata->rx_ring_count); - XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 1b45cd73a258..4b5d625de8f0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -126,6 +126,24 @@ #include "xgbe.h" #include "xgbe-common.h" +static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data) +{ + if (!pdata->phy_if.phy_impl.module_eeprom) + return -ENXIO; + + return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data); +} + +static int xgbe_phy_module_info(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo) +{ + if (!pdata->phy_if.phy_impl.module_info) + return -ENXIO; + + return pdata->phy_if.phy_impl.module_info(pdata, modinfo); +} + static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) { int reg; @@ -198,31 +216,8 @@ static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata) xgbe_an37_clear_interrupts(pdata); } -static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata) -{ - unsigned int reg; - - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); - - reg |= XGBE_KR_TRAINING_ENABLE; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); -} - -static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata) -{ - unsigned int reg; - - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); - - reg &= ~XGBE_KR_TRAINING_ENABLE; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); -} - static void xgbe_kr_mode(struct xgbe_prv_data *pdata) { - /* Enable KR training */ - xgbe_an73_enable_kr_training(pdata); - /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); @@ -232,9 +227,6 @@ static void xgbe_kr_mode(struct xgbe_prv_data *pdata) static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 2.5G speed */ pdata->hw_if.set_speed(pdata, SPEED_2500); @@ -244,9 +236,6 @@ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -260,9 +249,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) if (pdata->kr_redrv) return xgbe_kr_mode(pdata); - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); @@ -272,9 +258,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) static void xgbe_x_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -284,9 +267,6 @@ static void xgbe_x_mode(struct xgbe_prv_data *pdata) static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -296,9 +276,6 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) { - /* Disable KR training */ - xgbe_an73_disable_kr_training(pdata); - /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); @@ -354,13 +331,15 @@ static void xgbe_switch_mode(struct xgbe_prv_data *pdata) xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); } -static void xgbe_set_mode(struct xgbe_prv_data *pdata, +static bool xgbe_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { if (mode == xgbe_cur_mode(pdata)) - return; + return false; xgbe_change_mode(pdata, mode); + + return true; } static bool xgbe_use_mode(struct xgbe_prv_data *pdata, @@ -407,6 +386,12 @@ static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable, { unsigned int reg; + /* Disable KR training for now */ + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + reg &= ~XGBE_KR_TRAINING_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + + /* Update AN settings */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); reg &= ~MDIO_AN_CTRL1_ENABLE; @@ -504,21 +489,19 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); /* Start KR training */ - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); - if (reg & XGBE_KR_TRAINING_ENABLE) { - if (pdata->phy_if.phy_impl.kr_training_pre) - pdata->phy_if.phy_impl.kr_training_pre(pdata); + if (pdata->phy_if.phy_impl.kr_training_pre) + pdata->phy_if.phy_impl.kr_training_pre(pdata); - reg |= XGBE_KR_TRAINING_START; - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, - reg); + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); + reg |= XGBE_KR_TRAINING_ENABLE; + reg |= XGBE_KR_TRAINING_START; + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - netif_dbg(pdata, link, pdata->netdev, - "KR training initiated\n"); + netif_dbg(pdata, link, pdata->netdev, + "KR training initiated\n"); - if (pdata->phy_if.phy_impl.kr_training_post) - pdata->phy_if.phy_impl.kr_training_post(pdata); - } + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); return XGBE_AN_PAGE_RECEIVED; } @@ -1197,21 +1180,23 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) return 0; } -static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) +static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode) { int ret; + mutex_lock(&pdata->an_mutex); + set_bit(XGBE_LINK_INIT, &pdata->dev_state); pdata->link_check = jiffies; ret = pdata->phy_if.phy_impl.an_config(pdata); if (ret) - return ret; + goto out; if (pdata->phy.autoneg != AUTONEG_ENABLE) { ret = xgbe_phy_config_fixed(pdata); if (ret || !pdata->kr_redrv) - return ret; + goto out; netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n"); } else { @@ -1221,24 +1206,27 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) /* Disable auto-negotiation interrupt */ disable_irq(pdata->an_irq); - /* Start auto-negotiation in a supported mode */ - if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { - xgbe_set_mode(pdata, XGBE_MODE_KR); - } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { - xgbe_set_mode(pdata, XGBE_MODE_KX_2500); - } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { - xgbe_set_mode(pdata, XGBE_MODE_KX_1000); - } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { - xgbe_set_mode(pdata, XGBE_MODE_SFI); - } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { - xgbe_set_mode(pdata, XGBE_MODE_X); - } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { - xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); - } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { - xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); - } else { - enable_irq(pdata->an_irq); - return -EINVAL; + if (set_mode) { + /* Start auto-negotiation in a supported mode */ + if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { + xgbe_set_mode(pdata, XGBE_MODE_KR); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { + xgbe_set_mode(pdata, XGBE_MODE_KX_2500); + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { + xgbe_set_mode(pdata, XGBE_MODE_KX_1000); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { + xgbe_set_mode(pdata, XGBE_MODE_SFI); + } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { + xgbe_set_mode(pdata, XGBE_MODE_X); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { + xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { + xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); + } else { + enable_irq(pdata->an_irq); + ret = -EINVAL; + goto out; + } } /* Disable and stop any in progress auto-negotiation */ @@ -1258,16 +1246,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) xgbe_an_init(pdata); xgbe_an_restart(pdata); - return 0; -} - -static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) -{ - int ret; - - mutex_lock(&pdata->an_mutex); - - ret = __xgbe_phy_config_aneg(pdata); +out: if (ret) set_bit(XGBE_LINK_ERR, &pdata->dev_state); else @@ -1278,6 +1257,16 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) return ret; } +static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) +{ + return __xgbe_phy_config_aneg(pdata, true); +} + +static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata) +{ + return __xgbe_phy_config_aneg(pdata, false); +} + static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) { return (pdata->an_result == XGBE_AN_COMPLETE); @@ -1334,7 +1323,8 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; - xgbe_set_mode(pdata, mode); + if (xgbe_set_mode(pdata, mode) && pdata->an_again) + xgbe_phy_reconfig_aneg(pdata); } static void xgbe_phy_status(struct xgbe_prv_data *pdata) @@ -1639,4 +1629,7 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) phy_if->phy_valid_speed = xgbe_phy_valid_speed; phy_if->an_isr = xgbe_an_combined_isr; + + phy_if->module_info = xgbe_phy_module_info; + phy_if->module_eeprom = xgbe_phy_module_eeprom; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 82d1f416ee2a..7b86240ecd5f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -335,16 +335,33 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pdata->awcr = XGBE_DMA_PCI_AWCR; pdata->awarcr = XGBE_DMA_PCI_AWARCR; + /* Read the port property registers */ + pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0); + pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1); + pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2); + pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3); + pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4); + if (netif_msg_probe(pdata)) { + dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0); + dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1); + dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2); + dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3); + dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4); + } + /* Set the maximum channels and queues */ - reg = XP_IOREAD(pdata, XP_PROP_1); - pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); - pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); - pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); - pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); + pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_TX_DMA); + pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_RX_DMA); + pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_TX_QUEUES); + pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, + MAX_RX_QUEUES); if (netif_msg_probe(pdata)) { dev_dbg(dev, "max tx/rx channel count = %u/%u\n", pdata->tx_max_channel_count, - pdata->tx_max_channel_count); + pdata->rx_max_channel_count); dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n", pdata->tx_max_q_count, pdata->rx_max_q_count); } @@ -353,12 +370,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) xgbe_set_counts(pdata); /* Set the maximum fifo amounts */ - reg = XP_IOREAD(pdata, XP_PROP_2); - pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); + pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, + TX_FIFO_SIZE); pdata->tx_max_fifo_size *= 16384; pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, pdata->vdata->tx_max_fifo_size); - pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); + pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, + RX_FIFO_SIZE); pdata->rx_max_fifo_size *= 16384; pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, pdata->vdata->rx_max_fifo_size); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index aac884314000..3ceb4f95ca7c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -119,6 +119,7 @@ #include <linux/kmod.h> #include <linux/mdio.h> #include <linux/phy.h> +#include <linux/ethtool.h> #include "xgbe.h" #include "xgbe-common.h" @@ -270,6 +271,15 @@ struct xgbe_sfp_eeprom { u8 vendor[32]; }; +#define XGBE_SFP_DIAGS_SUPPORTED(_x) \ + ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \ + !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) + +#define XGBE_SFP_EEPROM_BASE_LEN 256 +#define XGBE_SFP_EEPROM_DIAG_LEN 256 +#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \ + XGBE_SFP_EEPROM_DIAG_LEN) + #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " @@ -327,8 +337,6 @@ struct xgbe_phy_data { unsigned int mdio_addr; - unsigned int comm_owned; - /* SFP Support */ enum xgbe_sfp_comm sfp_comm; unsigned int sfp_mux_address; @@ -345,7 +353,6 @@ struct xgbe_phy_data { unsigned int sfp_rx_los; unsigned int sfp_tx_fault; unsigned int sfp_mod_absent; - unsigned int sfp_diags; unsigned int sfp_changed; unsigned int sfp_phy_avail; unsigned int sfp_cable_len; @@ -382,12 +389,6 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata); static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *i2c_op) { - struct xgbe_phy_data *phy_data = pdata->phy_data; - - /* Be sure we own the bus */ - if (WARN_ON(!phy_data->comm_owned)) - return -EIO; - return pdata->i2c_if.i2c_xfer(pdata, i2c_op); } @@ -549,10 +550,6 @@ static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata) static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata) { - struct xgbe_phy_data *phy_data = pdata->phy_data; - - phy_data->comm_owned = 0; - mutex_unlock(&xgbe_phy_comm_lock); } @@ -562,9 +559,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) unsigned long timeout; unsigned int mutex_id; - if (phy_data->comm_owned) - return 0; - /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, * the driver needs to take the software mutex and then the hardware * mutexes before being able to use the busses. @@ -593,7 +587,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); - phy_data->comm_owned = 1; return 0; } @@ -867,6 +860,9 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phydev->phy_id; + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return false; + if ((phy_id & 0xfffffff0) != 0x01ff0cc0) return false; @@ -892,8 +888,83 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) return true; } +static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; + unsigned int phy_id = phy_data->phydev->phy_id; + int reg; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return false; + + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], + XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) + return false; + + /* For Bel-Fuse, use the extra AN flag */ + pdata->an_again = 1; + + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], + XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) + return false; + + if ((phy_id & 0xfffffff0) != 0x03625d10) + return false; + + /* Disable RGMII mode */ + phy_write(phy_data->phydev, 0x18, 0x7007); + reg = phy_read(phy_data->phydev, 0x18); + phy_write(phy_data->phydev, 0x18, reg & ~0x0080); + + /* Enable fiber register bank */ + phy_write(phy_data->phydev, 0x1c, 0x7c00); + reg = phy_read(phy_data->phydev, 0x1c); + reg &= 0x03ff; + reg &= ~0x0001; + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001); + + /* Power down SerDes */ + reg = phy_read(phy_data->phydev, 0x00); + phy_write(phy_data->phydev, 0x00, reg | 0x00800); + + /* Configure SGMII-to-Copper mode */ + phy_write(phy_data->phydev, 0x1c, 0x7c00); + reg = phy_read(phy_data->phydev, 0x1c); + reg &= 0x03ff; + reg &= ~0x0006; + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004); + + /* Power up SerDes */ + reg = phy_read(phy_data->phydev, 0x00); + phy_write(phy_data->phydev, 0x00, reg & ~0x00800); + + /* Enable copper register bank */ + phy_write(phy_data->phydev, 0x1c, 0x7c00); + reg = phy_read(phy_data->phydev, 0x1c); + reg &= 0x03ff; + reg &= ~0x0001; + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg); + + /* Power up SerDes */ + reg = phy_read(phy_data->phydev, 0x00); + phy_write(phy_data->phydev, 0x00, reg & ~0x00800); + + phy_data->phydev->supported = PHY_GBIT_FEATURES; + phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + phy_data->phydev->advertising = phy_data->phydev->supported; + + netif_dbg(pdata, drv, pdata->netdev, + "BelFuse PHY quirk in place\n"); + + return true; +} + static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) { + if (xgbe_phy_belfuse_phy_quirks(pdata)) + return; + if (xgbe_phy_finisar_phy_quirks(pdata)) return; } @@ -910,6 +981,9 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) if (phy_data->phydev) return 0; + /* Clear the extra AN flag */ + pdata->an_again = 0; + /* Check for the use of an external PHY */ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) return 0; @@ -1034,37 +1108,6 @@ static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) return false; } -static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) -{ - struct xgbe_phy_data *phy_data = pdata->phy_data; - struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; - - if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], - XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) - return false; - - if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], - XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) { - phy_data->sfp_base = XGBE_SFP_BASE_1000_SX; - phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; - phy_data->sfp_speed = XGBE_SFP_SPEED_1000; - if (phy_data->sfp_changed) - netif_dbg(pdata, drv, pdata->netdev, - "Bel-Fuse SFP quirk in place\n"); - return true; - } - - return false; -} - -static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata) -{ - if (xgbe_phy_belfuse_parse_quirks(pdata)) - return true; - - return false; -} - static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -1083,9 +1126,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); - if (xgbe_phy_sfp_parse_quirks(pdata)) - return; - /* Assume ACTIVE cable unless told it is PASSIVE */ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; @@ -1227,13 +1267,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); - if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) { - u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG]; - - if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) - phy_data->sfp_diags = 1; - } - xgbe_phy_free_phy_device(pdata); } else { phy_data->sfp_changed = 0; @@ -1283,7 +1316,6 @@ static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data) phy_data->sfp_rx_los = 0; phy_data->sfp_tx_fault = 0; phy_data->sfp_mod_absent = 1; - phy_data->sfp_diags = 0; phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN; phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN; phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN; @@ -1326,6 +1358,130 @@ put: xgbe_phy_put_comm_ownership(pdata); } +static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + u8 eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX]; + struct xgbe_sfp_eeprom *sfp_eeprom; + unsigned int i, j, rem; + int ret; + + rem = eeprom->len; + + if (!eeprom->len) { + ret = -EINVAL; + goto done; + } + + if ((eeprom->offset + eeprom->len) > XGBE_SFP_EEPROM_MAX) { + ret = -EINVAL; + goto done; + } + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { + ret = -ENXIO; + goto done; + } + + if (!netif_running(pdata->netdev)) { + ret = -EIO; + goto done; + } + + if (phy_data->sfp_mod_absent) { + ret = -EIO; + goto done; + } + + ret = xgbe_phy_get_comm_ownership(pdata); + if (ret) { + ret = -EIO; + goto done; + } + + ret = xgbe_phy_sfp_get_mux(pdata); + if (ret) { + netdev_err(pdata->netdev, "I2C error setting SFP MUX\n"); + ret = -EIO; + goto put_own; + } + + /* Read the SFP serial ID eeprom */ + eeprom_addr = 0; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + eeprom_data, XGBE_SFP_EEPROM_BASE_LEN); + if (ret) { + netdev_err(pdata->netdev, + "I2C error reading SFP EEPROM\n"); + ret = -EIO; + goto put_mux; + } + + sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data; + + if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) { + /* Read the SFP diagnostic eeprom */ + eeprom_addr = 0; + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS, + &eeprom_addr, sizeof(eeprom_addr), + eeprom_data + XGBE_SFP_EEPROM_BASE_LEN, + XGBE_SFP_EEPROM_DIAG_LEN); + if (ret) { + netdev_err(pdata->netdev, + "I2C error reading SFP DIAGS\n"); + ret = -EIO; + goto put_mux; + } + } + + for (i = 0, j = eeprom->offset; i < eeprom->len; i++, j++) { + if ((j >= XGBE_SFP_EEPROM_BASE_LEN) && + !XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) + break; + + data[i] = eeprom_data[j]; + rem--; + } + +put_mux: + xgbe_phy_sfp_put_mux(pdata); + +put_own: + xgbe_phy_put_comm_ownership(pdata); + +done: + eeprom->len -= rem; + + return ret; +} + +static int xgbe_phy_module_info(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) + return -ENXIO; + + if (!netif_running(pdata->netdev)) + return -EIO; + + if (phy_data->sfp_mod_absent) + return -EIO; + + if (XGBE_SFP_DIAGS_SUPPORTED(&phy_data->sfp_eeprom)) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + + return 0; +} + static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) { struct ethtool_link_ksettings *lks = &pdata->phy.lks; @@ -1611,6 +1767,10 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, XGBE_CLR_ADV(dlks, 1000baseKX_Full); XGBE_CLR_ADV(dlks, 10000baseKR_Full); + /* Advertise FEC support is present */ + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) + XGBE_SET_ADV(dlks, 10000baseR_FEC); + switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: XGBE_SET_ADV(dlks, 10000baseKR_Full); @@ -2421,22 +2581,21 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg; - - reg = XP_IOREAD(pdata, XP_PROP_3); phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 + - XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR); + XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_ADDR); - phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK); + phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3, + GPIO_MASK); - phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RX_LOS); - phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_TX_FAULT); - phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_MOD_ABS); - phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3, + phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RATE_SELECT); if (netif_msg_probe(pdata)) { @@ -2458,18 +2617,17 @@ static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg, mux_addr_hi, mux_addr_lo; + unsigned int mux_addr_hi, mux_addr_lo; - reg = XP_IOREAD(pdata, XP_PROP_4); - - mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI); - mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO); + mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI); + mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO); if (mux_addr_lo == XGBE_SFP_DIRECT) return; phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545; phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; - phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN); + phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4, + MUX_CHAN); if (netif_msg_probe(pdata)) { dev_dbg(pdata->dev, "SFP: mux_address=%#x\n", @@ -2592,13 +2750,11 @@ static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data) static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg; if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) return 0; - reg = XP_IOREAD(pdata, XP_PROP_3); - phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET); + phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET); switch (phy_data->mdio_reset) { case XGBE_MDIO_RESET_NONE: case XGBE_MDIO_RESET_I2C_GPIO: @@ -2612,12 +2768,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) { phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 + - XP_GET_BITS(reg, XP_PROP_3, + XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_ADDR); - phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_GPIO); } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) { - phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_INT_GPIO); } @@ -2707,12 +2863,9 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata) static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) { - unsigned int reg; - - reg = XP_IOREAD(pdata, XP_PROP_0); - if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS)) + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS)) return false; - if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE)) + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE)) return false; return true; @@ -2921,7 +3074,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data; struct mii_bus *mii; - unsigned int reg; int ret; /* Check if enabled */ @@ -2940,12 +3092,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) return -ENOMEM; pdata->phy_data = phy_data; - reg = XP_IOREAD(pdata, XP_PROP_0); - phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE); - phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID); - phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS); - phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE); - phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR); + phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE); + phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID); + phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS); + phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE); + phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR); if (netif_msg_probe(pdata)) { dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode); dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id); @@ -2954,12 +3105,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr); } - reg = XP_IOREAD(pdata, XP_PROP_4); - phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT); - phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF); - phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR); - phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE); - phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL); + phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT); + phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF); + phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR); + phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE); + phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL); if (phy_data->redrv && netif_msg_probe(pdata)) { dev_dbg(pdata->dev, "redrv present\n"); dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if); @@ -3231,4 +3381,7 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; phy_impl->kr_training_post = xgbe_phy_kr_training_post; + + phy_impl->module_info = xgbe_phy_module_info; + phy_impl->module_eeprom = xgbe_phy_module_eeprom; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 95d4b56448c6..47bcbcf58048 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -144,6 +144,11 @@ #define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) #define XGBE_RX_DESC_CNT 512 +#define XGBE_TX_DESC_CNT_MIN 64 +#define XGBE_TX_DESC_CNT_MAX 4096 +#define XGBE_RX_DESC_CNT_MIN 64 +#define XGBE_RX_DESC_CNT_MAX 4096 + #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) /* Descriptors required for maximum contiguous TSO/GSO packet */ @@ -835,6 +840,7 @@ struct xgbe_hw_if { * Optional routines: * an_pre, an_post * kr_training_pre, kr_training_post + * module_info, module_eeprom */ struct xgbe_phy_impl_if { /* Perform Setup/teardown actions */ @@ -883,6 +889,12 @@ struct xgbe_phy_impl_if { /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); + + /* SFP module related info */ + int (*module_info)(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data); }; struct xgbe_phy_if { @@ -905,6 +917,12 @@ struct xgbe_phy_if { /* For single interrupt support */ irqreturn_t (*an_isr)(struct xgbe_prv_data *); + /* For ethtool PHY support */ + int (*module_info)(struct xgbe_prv_data *pdata, + struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct xgbe_prv_data *pdata, + struct ethtool_eeprom *eeprom, u8 *data); + /* PHY implementation specific services */ struct xgbe_phy_impl_if phy_impl; }; @@ -1027,6 +1045,13 @@ struct xgbe_prv_data { void __iomem *xprop_regs; /* XGBE property registers */ void __iomem *xi2c_regs; /* XGBE I2C CSRs */ + /* Port property registers */ + unsigned int pp0; + unsigned int pp1; + unsigned int pp2; + unsigned int pp3; + unsigned int pp4; + /* Overall device lock */ spinlock_t lock; @@ -1097,6 +1122,9 @@ struct xgbe_prv_data { unsigned int rx_ring_count; unsigned int rx_desc_count; + unsigned int new_tx_ring_count; + unsigned int new_rx_ring_count; + unsigned int tx_max_q_count; unsigned int rx_max_q_count; unsigned int tx_q_count; @@ -1233,6 +1261,7 @@ struct xgbe_prv_data { enum xgbe_rx kr_state; enum xgbe_rx kx_state; struct work_struct an_work; + unsigned int an_again; unsigned int an_supported; unsigned int parallel_detect; unsigned int fec_ability; @@ -1310,6 +1339,8 @@ int xgbe_powerup(struct net_device *, unsigned int); int xgbe_powerdown(struct net_device *, unsigned int); void xgbe_init_rx_coalesce(struct xgbe_prv_data *); void xgbe_init_tx_coalesce(struct xgbe_prv_data *); +void xgbe_restart_dev(struct xgbe_prv_data *pdata); +void xgbe_full_restart_dev(struct xgbe_prv_data *pdata); #ifdef CONFIG_DEBUG_FS void xgbe_debugfs_init(struct xgbe_prv_data *); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 8cfce95c82fc..39cd3a27fe77 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -107,7 +107,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) return 0; } -int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) +static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) { int err = 0; u32 h = 0U; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f33b25fbca63..d5fca2e5a9bc 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -654,7 +654,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, pkts = priv->rx_max_coalesced_frames; if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { - moder = net_dim_get_def_profile(priv->dim.dim.mode); + moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode); usecs = moder.usec; pkts = moder.pkts; } @@ -1064,7 +1064,7 @@ static void bcm_sysport_dim_work(struct work_struct *work) struct bcm_sysport_priv *priv = container_of(ndim, struct bcm_sysport_priv, dim); struct net_dim_cq_moder cur_profile = - net_dim_get_profile(dim->mode, dim->profile_ix); + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); dim->state = NET_DIM_START_MEASURE; @@ -1437,7 +1437,7 @@ static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) /* If DIM was enabled, re-apply default parameters */ if (dim->use_dim) { - moder = net_dim_get_def_profile(dim->dim.mode); + moder = net_dim_get_def_rx_moderation(dim->dim.mode); usecs = moder.usec; pkts = moder.pkts; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 9ffc4a8c5fc7..3853296d78c1 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -384,7 +384,7 @@ static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, struct bnx2 *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - if (ops == NULL) + if (!ops) return -EINVAL; if (cp->drv_state & CNIC_DRV_STATE_REGD) @@ -755,13 +755,13 @@ bnx2_alloc_tx_mem(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL); - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) return -ENOMEM; txr->tx_desc_ring = dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE, &txr->tx_desc_mapping, GFP_KERNEL); - if (txr->tx_desc_ring == NULL) + if (!txr->tx_desc_ring) return -ENOMEM; } return 0; @@ -779,7 +779,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) rxr->rx_buf_ring = vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); - if (rxr->rx_buf_ring == NULL) + if (!rxr->rx_buf_ring) return -ENOMEM; for (j = 0; j < bp->rx_max_ring; j++) { @@ -788,7 +788,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_desc_ring[j] == NULL) + if (!rxr->rx_desc_ring[j]) return -ENOMEM; } @@ -796,7 +796,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) if (bp->rx_pg_ring_size) { rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE * bp->rx_max_pg_ring); - if (rxr->rx_pg_ring == NULL) + if (!rxr->rx_pg_ring) return -ENOMEM; } @@ -807,7 +807,7 @@ bnx2_alloc_rx_mem(struct bnx2 *bp) RXBD_RING_SIZE, &rxr->rx_pg_desc_mapping[j], GFP_KERNEL); - if (rxr->rx_pg_desc_ring[j] == NULL) + if (!rxr->rx_pg_desc_ring[j]) return -ENOMEM; } @@ -845,7 +845,7 @@ bnx2_alloc_stats_blk(struct net_device *dev) sizeof(struct statistics_block); status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size, &bp->status_blk_mapping, GFP_KERNEL); - if (status_blk == NULL) + if (!status_blk) return -ENOMEM; bp->status_blk = status_blk; @@ -914,7 +914,7 @@ bnx2_alloc_mem(struct bnx2 *bp) BNX2_PAGE_SIZE, &bp->ctx_blk_mapping[i], GFP_KERNEL); - if (bp->ctx_blk[i] == NULL) + if (!bp->ctx_blk[i]) goto alloc_mem_err; } } @@ -2667,7 +2667,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp) u32 val; good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); - if (good_mbuf == NULL) + if (!good_mbuf) return -ENOMEM; BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, @@ -3225,7 +3225,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) if (len <= bp->rx_copy_thresh) { skb = netdev_alloc_skb(bp->dev, len + 6); - if (skb == NULL) { + if (!skb) { bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons, sw_ring_prod); goto next_rx; @@ -3285,7 +3285,7 @@ next_rx: sw_cons = BNX2_NEXT_RX_BD(sw_cons); sw_prod = BNX2_NEXT_RX_BD(sw_prod); - if ((rx_pkt == budget)) + if (rx_pkt == budget) break; /* Refresh hw_cons to see if there is new work */ @@ -4561,7 +4561,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (align_start || align_end) { align_buf = kmalloc(len32, GFP_KERNEL); - if (align_buf == NULL) + if (!align_buf) return -ENOMEM; if (align_start) { memcpy(align_buf, start, 4); @@ -4575,7 +4575,7 @@ bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf, if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) { flash_buffer = kmalloc(264, GFP_KERNEL); - if (flash_buffer == NULL) { + if (!flash_buffer) { rc = -ENOMEM; goto nvram_write_end; } @@ -5440,7 +5440,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct bnx2_tx_ring_info *txr = &bnapi->tx_ring; int j; - if (txr->tx_buf_ring == NULL) + if (!txr->tx_buf_ring) continue; for (j = 0; j < BNX2_TX_DESC_CNT; ) { @@ -5448,7 +5448,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) struct sk_buff *skb = tx_buf->skb; int k, last; - if (skb == NULL) { + if (!skb) { j = BNX2_NEXT_TX_BD(j); continue; } @@ -5485,14 +5485,14 @@ bnx2_free_rx_skbs(struct bnx2 *bp) struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring; int j; - if (rxr->rx_buf_ring == NULL) + if (!rxr->rx_buf_ring) return; for (j = 0; j < bp->rx_max_ring_idx; j++) { struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j]; u8 *data = rx_buf->data; - if (data == NULL) + if (!data) continue; dma_unmap_single(&bp->pdev->dev, @@ -6826,7 +6826,7 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats) { struct bnx2 *bp = netdev_priv(dev); - if (bp->stats_blk == NULL) + if (!bp->stats_blk) return; net_stats->rx_packets = @@ -7217,7 +7217,7 @@ bnx2_get_eeprom_len(struct net_device *dev) { struct bnx2 *bp = netdev_priv(dev); - if (bp->flash_info == NULL) + if (!bp->flash_info) return 0; return (int) bp->flash_size; @@ -7678,7 +7678,7 @@ bnx2_get_ethtool_stats(struct net_device *dev, u32 *temp_stats = (u32 *) bp->temp_stats_blk; u8 *stats_len_arr = NULL; - if (hw_stats == NULL) { + if (!hw_stats) { memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS); return; } @@ -8121,7 +8121,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bp->temp_stats_blk = kzalloc(sizeof(struct statistics_block), GFP_KERNEL); - if (bp->temp_stats_blk == NULL) { + if (!bp->temp_stats_blk) { rc = -ENOMEM; goto err_out; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 95871576ab92..8cd73ff5debc 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4962,8 +4962,13 @@ void bnx2x_tx_timeout(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); -#ifdef BNX2X_STOP_ON_ERROR + /* We want the information of the dump logged, + * but calling bnx2x_panic() would kill all chances of recovery. + */ if (!bp->panic) +#ifndef BNX2X_STOP_ON_ERROR + bnx2x_panic_dump(bp, false); +#else bnx2x_panic(); #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 7dd83d0ef0a0..22243c480a05 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params, * slots for the highest priority. */ REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : - NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); + NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); /* Mapping between the CREDIT_WEIGHT registers and actual client * numbers */ diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 7c560d545c03..5a779b19d149 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o +bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f83769d8047b..dfa0839f6656 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -62,6 +62,7 @@ #include "bnxt_vfr.h" #include "bnxt_tc.h" #include "bnxt_devlink.h" +#include "bnxt_debugfs.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -2383,6 +2384,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring; + u8 qidx; ring = &txr->tx_ring_struct; @@ -2411,7 +2413,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); } - ring->queue_id = bp->q_info[j].queue_id; + qidx = bp->tc_to_qidx[j]; + ring->queue_id = bp->q_info[qidx].queue_id; if (i < bp->tx_nr_rings_xdp) continue; if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) @@ -3493,15 +3496,29 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; + /* convert timeout to usec */ + timeout *= 1000; i = 0; - tmo_count = timeout * 40; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && i++ < tmo_count) { - usleep_range(25, 40); + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + else + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); } if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { @@ -3513,25 +3530,34 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, HWRM_RESP_LEN_SFT; valid = bp->hwrm_cmd_resp_addr + len - 1; } else { + int j; + /* Check if response len is updated */ for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; if (len) break; - usleep_range(25, 40); + /* on first few passes, just barely sleep */ + if (i < DFLT_HWRM_CMD_TIMEOUT) + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + else + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); } if (i >= tmo_count) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - timeout, le16_to_cpu(req->req_type), + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), le16_to_cpu(req->seq_id), len); return -1; } /* Last byte of resp contains valid bit */ valid = bp->hwrm_cmd_resp_addr + len - 1; - for (i = 0; i < 5; i++) { + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { /* make sure we read from updated DMA memory */ dma_rmb(); if (*valid) @@ -3539,9 +3565,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, udelay(1); } - if (i >= 5) { + if (j >= HWRM_VALID_BIT_DELAY_USEC) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - timeout, le16_to_cpu(req->req_type), + HWRM_TOTAL_TIMEOUT(i), + le16_to_cpu(req->req_type), le16_to_cpu(req->seq_id), len, *valid); return -1; } @@ -4334,26 +4361,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, mutex_unlock(&bp->hwrm_cmd_lock); if (rc || err) { - switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_L2_CMPL: - netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", - rc, err); - return -1; - - case RING_FREE_REQ_RING_TYPE_RX: - netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", - rc, err); - return -1; - - case RING_FREE_REQ_RING_TYPE_TX: - netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", - rc, err); - return -1; - - default: - netdev_err(bp->dev, "Invalid ring\n"); - return -1; - } + netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", + ring_type, rc, err); + return -EIO; } ring->fw_ring_id = ring_id; return rc; @@ -4477,23 +4487,9 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, mutex_unlock(&bp->hwrm_cmd_lock); if (rc || error_code) { - switch (ring_type) { - case RING_FREE_REQ_RING_TYPE_L2_CMPL: - netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", - rc); - return rc; - case RING_FREE_REQ_RING_TYPE_RX: - netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", - rc); - return rc; - case RING_FREE_REQ_RING_TYPE_TX: - netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", - rc); - return rc; - default: - netdev_err(bp->dev, "Invalid ring\n"); - return -1; - } + netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", + ring_type, rc, error_code); + return -EIO; } return 0; } @@ -4721,6 +4717,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, cp_rings, vnics); + req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS); + req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -5309,6 +5309,7 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) for (i = 0; i < bp->max_tc; i++) { bp->q_info[i].queue_id = *qptr++; bp->q_info[i].queue_profile = *qptr++; + bp->tc_to_qidx[i] = i; } qportcfg_exit: @@ -5376,7 +5377,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) struct tm tm; time64_t now = ktime_get_real_seconds(); - if (bp->hwrm_spec_code < 0x10400) + if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || + bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; time64_to_tm(now, 0, &tm); @@ -5958,6 +5960,9 @@ static int bnxt_init_msix(struct bnxt *bp) if (total_vecs > max) total_vecs = max; + if (!total_vecs) + return 0; + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); if (!msix_ent) return -ENOMEM; @@ -6457,6 +6462,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) } mutex_unlock(&bp->hwrm_cmd_lock); + if (!BNXT_SINGLE_PF(bp)) + return 0; + diff = link_info->support_auto_speeds ^ link_info->advertising; if ((link_info->support_auto_speeds | diff) != link_info->support_auto_speeds) { @@ -6843,6 +6851,8 @@ static void bnxt_preset_reg_win(struct bnxt *bp) } } +static int bnxt_init_dflt_ring_mode(struct bnxt *bp); + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -6850,6 +6860,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_preset_reg_win(bp); netif_carrier_off(bp->dev); if (irq_re_init) { + /* Reserve rings now if none were reserved at driver probe. */ + rc = bnxt_init_dflt_ring_mode(bp); + if (rc) { + netdev_err(bp->dev, "Failed to reserve default rings at open\n"); + return rc; + } rc = bnxt_reserve_rings(bp); if (rc) return rc; @@ -6877,6 +6893,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } bnxt_enable_napi(bp); + bnxt_debug_dev_init(bp); rc = bnxt_init_nic(bp, irq_re_init); if (rc) { @@ -6909,6 +6926,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) return 0; open_err: + bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); bnxt_del_napi(bp); @@ -7002,6 +7020,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ + bnxt_debug_dev_exit(bp); bnxt_disable_napi(bp); del_timer_sync(&bp->timer); bnxt_free_skbs(bp); @@ -7279,6 +7298,25 @@ skip_uc: return rc; } +static bool bnxt_can_reserve_rings(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + /* No minimum rings were provisioned by the PF. Don't + * reserve rings by default when device is down. + */ + if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) + return true; + + if (!netif_running(bp->dev)) + return false; + } +#endif + return true; +} + /* If the chip and firmware supports RFS */ static bool bnxt_rfs_supported(struct bnxt *bp) { @@ -7295,7 +7333,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp) #ifdef CONFIG_RFS_ACCEL int vnics, max_vnics, max_rss_ctxs; - if (!(bp->flags & BNXT_FLAG_MSIX_CAP)) + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp)) return false; vnics = 1 + bp->rx_nr_rings; @@ -7729,7 +7767,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) coal->coal_bufs = 30; coal->coal_ticks_irq = 1; coal->coal_bufs_irq = 2; - coal->idle_thresh = 25; + coal->idle_thresh = 50; coal->bufs_per_record = 2; coal->budget = 64; /* NAPI budget */ @@ -8529,6 +8567,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) { int dflt_rings, max_rx_rings, max_tx_rings, rc; + if (!bnxt_can_reserve_rings(bp)) + return 0; + if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); @@ -8574,6 +8615,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) return rc; } +static int bnxt_init_dflt_ring_mode(struct bnxt *bp) +{ + int rc; + + if (bp->tx_nr_rings) + return 0; + + rc = bnxt_set_dflt_rings(bp, true); + if (rc) { + netdev_err(bp->dev, "Not enough rings available.\n"); + return rc; + } + rc = bnxt_init_int_mode(bp); + if (rc) + return rc; + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) { + bp->flags |= BNXT_FLAG_RFS; + bp->dev->features |= NETIF_F_NTUPLE; + } + return 0; +} + int bnxt_restore_pf_fw_resources(struct bnxt *bp) { int rc; @@ -8614,8 +8678,8 @@ static int bnxt_init_mac_addr(struct bnxt *bp) memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); } else { eth_hw_addr_random(bp->dev); - rc = bnxt_approve_mac(bp, bp->dev->dev_addr); } + rc = bnxt_approve_mac(bp, bp->dev->dev_addr); #endif } return rc; @@ -9078,6 +9142,7 @@ static struct pci_driver bnxt_pci_driver = { static int __init bnxt_init(void) { + bnxt_debug_init(); return pci_register_driver(&bnxt_pci_driver); } @@ -9086,6 +9151,7 @@ static void __exit bnxt_exit(void) pci_unregister_driver(&bnxt_pci_driver); if (bnxt_pf_wq) destroy_workqueue(bnxt_pf_wq); + bnxt_debug_exit(); } module_init(bnxt_init); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3d55d3b56865..9b14eb610b9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -532,6 +532,19 @@ struct rx_tpa_end_cmp_ext { #define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) +#define HWRM_SHORT_MIN_TIMEOUT 3 +#define HWRM_SHORT_MAX_TIMEOUT 10 +#define HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define HWRM_MIN_TIMEOUT 25 +#define HWRM_MAX_TIMEOUT 40 + +#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ + ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ + (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ + ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) + +#define HWRM_VALID_BIT_DELAY_USEC 20 #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 @@ -1242,6 +1255,7 @@ struct bnxt { u8 max_tc; u8 max_lltc; /* lossless TCs */ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; + u8 tc_to_qidx[BNXT_MAX_QUEUE]; unsigned int current_interval; #define BNXT_TIMER_INTERVAL HZ @@ -1384,6 +1398,8 @@ struct bnxt { u16 *cfa_code_map; /* cfa_code -> vf_idx map */ u8 switch_id[8]; struct bnxt_tc_info *tc_info; + struct dentry *debugfs_pdev; + struct dentry *debugfs_dim; }; #define BNXT_RX_STATS_OFFSET(counter) \ @@ -1398,8 +1414,7 @@ struct bnxt { #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 -#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e -#define SFP_EEPROM_SFF_8472_COMP_SIZE 1 +#define SFF_DIAG_SUPPORT_OFFSET 0x5c #define SFF_MODULE_ID_SFP 0x3 #define SFF_MODULE_ID_QSFP 0xc #define SFF_MODULE_ID_QSFP_PLUS 0xd diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 3c746f2d9ed8..d5bc72cecde3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -21,6 +21,21 @@ #include "bnxt_dcb.h" #ifdef CONFIG_BNXT_DCB +static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id) +{ + int i, j; + + for (i = 0; i < bp->max_tc; i++) { + if (bp->q_info[i].queue_id == queue_id) { + for (j = 0; j < bp->max_tc; j++) { + if (bp->tc_to_qidx[j] == i) + return j; + } + } + } + return -EINVAL; +} + static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) { struct hwrm_queue_pri2cos_cfg_input req = {0}; @@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) pri2cos = &req.pri0_cos_queue_id; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + u8 qidx; + req.enables |= cpu_to_le32( QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); - pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id; + qidx = bp->tc_to_qidx[ets->prio_tc[i]]; + pri2cos[i] = bp->q_info[qidx].queue_id; } rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return rc; @@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; - int i, j; + int i; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { u8 queue_id = pri2cos[i]; + int tc; - for (j = 0; j < bp->max_tc; j++) { - if (bp->q_info[j].queue_id == queue_id) { - ets->prio_tc[i] = j; - break; - } - } + tc = bnxt_queue_to_tc(bp, queue_id); + if (tc >= 0) + ets->prio_tc[i] = tc; } } mutex_unlock(&bp->hwrm_cmd_lock); @@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, void *data; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); - data = &req.unused_0; - for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) { + for (i = 0; i < max_tc; i++) { + u8 qidx; + req.enables |= cpu_to_le32( QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i); memset(&cos2bw, 0, sizeof(cos2bw)); - cos2bw.queue_id = bp->q_info[i].queue_id; + qidx = bp->tc_to_qidx[i]; + cos2bw.queue_id = bp->q_info[qidx].queue_id; if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { cos2bw.tsa = QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP; @@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } + data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); - if (i == 0) { + if (qidx == 0) { req.queue_id0 = cos2bw.queue_id; req.unused_0 = 0; } @@ -132,66 +151,81 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { - int j; + int tc; memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4); if (i == 0) cos2bw.queue_id = resp->queue_id0; - for (j = 0; j < bp->max_tc; j++) { - if (bp->q_info[j].queue_id != cos2bw.queue_id) - continue; - if (cos2bw.tsa == - QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { - ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT; - } else { - ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS; - ets->tc_tx_bw[j] = cos2bw.bw_weight; - } + tc = bnxt_queue_to_tc(bp, cos2bw.queue_id); + if (tc < 0) + continue; + + if (cos2bw.tsa == + QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { + ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT; + } else { + ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS; + ets->tc_tx_bw[tc] = cos2bw.bw_weight; } } mutex_unlock(&bp->hwrm_cmd_lock); return 0; } -static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask) +static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask) { - struct hwrm_queue_cfg_input req = {0}; - int i; + unsigned long qmap = 0; + int max = bp->max_tc; + int i, j, rc; - if (netif_running(bp->dev)) - bnxt_tx_disable(bp); + /* Assign lossless TCs first */ + for (i = 0, j = 0; i < max; ) { + if (lltc_mask & (1 << i)) { + if (BNXT_LLQ(bp->q_info[j].queue_profile)) { + bp->tc_to_qidx[i] = j; + __set_bit(j, &qmap); + i++; + } + j++; + continue; + } + i++; + } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR); - req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE); + for (i = 0, j = 0; i < max; i++) { + if (lltc_mask & (1 << i)) + continue; + j = find_next_zero_bit(&qmap, max, j); + bp->tc_to_qidx[i] = j; + __set_bit(j, &qmap); + j++; + } - /* Configure lossless queues to lossy first */ - req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY; - for (i = 0; i < bp->max_tc; i++) { - if (BNXT_LLQ(bp->q_info[i].queue_profile)) { - req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); - hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); - bp->q_info[i].queue_profile = - QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY; + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + if (rc) { + netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc); + return rc; } } - - /* Now configure desired queues to lossless */ - req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; - for (i = 0; i < bp->max_tc; i++) { - if (lltc_mask & (1 << i)) { - req.queue_id = cpu_to_le32(bp->q_info[i].queue_id); - hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); - bp->q_info[i].queue_profile = - QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS; + if (bp->ieee_ets) { + int tc = netdev_get_num_tc(bp->dev); + + if (!tc) + tc = 1; + rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc); + if (rc) { + netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc); + return rc; + } + rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets); + if (rc) { + netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc); + return rc; } } - if (netif_running(bp->dev)) - bnxt_tx_enable(bp); - return 0; } @@ -201,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) struct ieee_ets *my_ets = bp->ieee_ets; unsigned int tc_mask = 0, pri_mask = 0; u8 i, pri, lltc_count = 0; - bool need_q_recfg = false; + bool need_q_remap = false; int rc; if (!my_ets) @@ -221,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) if (lltc_count > bp->max_lltc) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); - req.flags = cpu_to_le32(pri_mask); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - return rc; - for (i = 0; i < bp->max_tc; i++) { if (tc_mask & (1 << i)) { - if (!BNXT_LLQ(bp->q_info[i].queue_profile)) - need_q_recfg = true; + u8 qidx = bp->tc_to_qidx[i]; + + if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) { + need_q_remap = true; + break; + } } } - if (need_q_recfg) - rc = bnxt_hwrm_queue_cfg(bp, tc_mask); + if (need_q_remap) + rc = bnxt_queue_remap(bp, tc_mask); + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); + req.flags = cpu_to_le32(pri_mask); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return rc; return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c new file mode 100644 index 000000000000..94e208e9789f --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.c @@ -0,0 +1,124 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/pci.h> +#include "bnxt_hsi.h" +#include <linux/net_dim.h> +#include "bnxt.h" +#include "bnxt_debugfs.h" + +static struct dentry *bnxt_debug_mnt; + +static ssize_t debugfs_dim_read(struct file *filep, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct net_dim *dim = filep->private_data; + int len; + char *buf; + + if (*ppos) + return 0; + if (!dim) + return -ENODEV; + buf = kasprintf(GFP_KERNEL, + "state = %d\n" \ + "profile_ix = %d\n" \ + "mode = %d\n" \ + "tune_state = %d\n" \ + "steps_right = %d\n" \ + "steps_left = %d\n" \ + "tired = %d\n", + dim->state, + dim->profile_ix, + dim->mode, + dim->tune_state, + dim->steps_right, + dim->steps_left, + dim->tired); + if (!buf) + return -ENOMEM; + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_dim_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_dim_read, +}; + +static struct dentry *debugfs_dim_ring_init(struct net_dim *dim, int ring_idx, + struct dentry *dd) +{ + static char qname[16]; + + snprintf(qname, 10, "%d", ring_idx); + return debugfs_create_file(qname, 0600, dd, + dim, &debugfs_dim_fops); +} + +void bnxt_debug_dev_init(struct bnxt *bp) +{ + const char *pname = pci_name(bp->pdev); + struct dentry *pdevf; + int i; + + bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt); + if (bp->debugfs_pdev) { + pdevf = debugfs_create_dir("dim", bp->debugfs_pdev); + if (!pdevf) { + pr_err("failed to create debugfs entry %s/dim\n", + pname); + return; + } + bp->debugfs_dim = pdevf; + /* create files for each rx ring */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + if (cpr && bp->bnapi[i]->rx_ring) { + pdevf = debugfs_dim_ring_init(&cpr->dim, i, + bp->debugfs_dim); + if (!pdevf) + pr_err("failed to create debugfs entry %s/dim/%d\n", + pname, i); + } + } + } else { + pr_err("failed to create debugfs entry %s\n", pname); + } +} + +void bnxt_debug_dev_exit(struct bnxt *bp) +{ + if (bp) { + debugfs_remove_recursive(bp->debugfs_pdev); + bp->debugfs_pdev = NULL; + } +} + +void bnxt_debug_init(void) +{ + bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL); + if (!bnxt_debug_mnt) + pr_err("failed to init bnxt_en debugfs\n"); +} + +void bnxt_debug_exit(void) +{ + debugfs_remove_recursive(bnxt_debug_mnt); +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h new file mode 100644 index 000000000000..d0bb4887acd0 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_debugfs.h @@ -0,0 +1,23 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include "bnxt_hsi.h" +#include "bnxt.h" + +#ifdef CONFIG_DEBUG_FS +void bnxt_debug_init(void); +void bnxt_debug_exit(void); +void bnxt_debug_dev_init(struct bnxt *bp); +void bnxt_debug_dev_exit(struct bnxt *bp); +#else +static inline void bnxt_debug_init(void) {} +static inline void bnxt_debug_exit(void) {} +static inline void bnxt_debug_dev_init(struct bnxt *bp) {} +static inline void bnxt_debug_dev_exit(struct bnxt *bp) {} +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c index 408dd190331e..afa97c8bb081 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c @@ -21,11 +21,11 @@ void bnxt_dim_work(struct work_struct *work) struct bnxt_napi *bnapi = container_of(cpr, struct bnxt_napi, cp_ring); - struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, - dim->profile_ix); + struct net_dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); - cpr->rx_ring_coal.coal_ticks = cur_profile.usec; - cpr->rx_ring_coal.coal_bufs = cur_profile.pkts; + cpr->rx_ring_coal.coal_ticks = cur_moder.usec; + cpr->rx_ring_coal.coal_bufs = cur_moder.pkts; bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi); dim->state = NET_DIM_START_MEASURE; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8ba14ae00e8f..7270c8b0cef3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -140,6 +140,19 @@ reset_coalesce: #define BNXT_RX_STATS_EXT_ENTRY(counter) \ { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } +enum { + RX_TOTAL_DISCARDS, + TX_TOTAL_DISCARDS, +}; + +static struct { + u64 counter; + char string[ETH_GSTRING_LEN]; +} bnxt_sw_func_stats[] = { + {0, "rx_total_discard_pkts"}, + {0, "tx_total_discard_pkts"}, +}; + static const struct { long offset; char string[ETH_GSTRING_LEN]; @@ -237,6 +250,7 @@ static const struct { BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), }; +#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) @@ -244,6 +258,8 @@ static int bnxt_get_num_stats(struct bnxt *bp) { int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + num_stats += BNXT_NUM_SW_FUNC_STATS; + if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; @@ -279,6 +295,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, if (!bp->bnapi) return; + for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) + bnxt_sw_func_stats[i].counter = 0; + for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -288,7 +307,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (k = 0; k < stat_fields; j++, k++) buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; + + bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += + le64_to_cpu(cpr->hw_stats->rx_discard_pkts); + bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += + le64_to_cpu(cpr->hw_stats->tx_discard_pkts); } + + for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) + buf[j] = bnxt_sw_func_stats[i].counter; + if (bp->flags & BNXT_FLAG_PORT_STATS) { __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; @@ -359,6 +387,11 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) sprintf(buf, "[%d]: rx_l4_csum_errors", i); buf += ETH_GSTRING_LEN; } + for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { + strcpy(buf, bnxt_sw_func_stats[i].string); + buf += ETH_GSTRING_LEN; + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { strcpy(buf, bnxt_port_stats_arr[i].string); @@ -551,6 +584,8 @@ static int bnxt_set_channels(struct net_device *dev, * to renable */ } + } else { + rc = bnxt_reserve_rings(bp); } return rc; @@ -1785,6 +1820,11 @@ static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) static int bnxt_get_eeprom_len(struct net_device *dev) { + struct bnxt *bp = netdev_priv(dev); + + if (BNXT_VF(bp)) + return 0; + /* The -1 return value allows the entire 32-bit range of offsets to be * passed via the ethtool command-line utility. */ @@ -2144,9 +2184,8 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, static int bnxt_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { + u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; struct bnxt *bp = netdev_priv(dev); - struct hwrm_port_phy_i2c_read_input req = {0}; - struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; int rc; /* No point in going further if phy status indicates @@ -2161,21 +2200,19 @@ static int bnxt_get_module_info(struct net_device *dev, if (bp->hwrm_spec_code < 0x10202) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); - req.i2c_slave_addr = I2C_DEV_ADDR_A0; - req.page_number = 0; - req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR); - req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE; - req.port_id = cpu_to_le16(bp->pf.port_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, + SFF_DIAG_SUPPORT_OFFSET + 1, + data); if (!rc) { - u32 module_id = le32_to_cpu(output->data[0]); + u8 module_id = data[0]; + u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; switch (module_id) { case SFF_MODULE_ID_SFP: modinfo->type = ETH_MODULE_SFF_8472; modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + if (!diag_supported) + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; break; case SFF_MODULE_ID_QSFP: case SFF_MODULE_ID_QSFP_PLUS: @@ -2191,7 +2228,6 @@ static int bnxt_get_module_info(struct net_device *dev, break; } } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f952963d594e..a64910892c25 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) vf_vnics = hw_resc->max_vnics - bp->nr_vnics; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); - req.min_rsscos_ctx = cpu_to_le16(1); - req.max_rsscos_ctx = cpu_to_le16(1); + req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); + req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) { req.min_cmpl_rings = cpu_to_le16(1); req.min_tx_rings = cpu_to_le16(1); req.min_rx_rings = cpu_to_le16(1); - req.min_l2_ctxs = cpu_to_le16(1); + req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX); req.min_vnics = cpu_to_le16(1); req.min_stat_ctx = cpu_to_le16(1); req.min_hw_ring_grps = cpu_to_le16(1); @@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); req.min_tx_rings = cpu_to_le16(vf_tx_rings); req.min_rx_rings = cpu_to_le16(vf_rx_rings); - req.min_l2_ctxs = cpu_to_le16(4); + req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req.min_vnics = cpu_to_le16(vf_vnics); req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); @@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); req.max_tx_rings = cpu_to_le16(vf_tx_rings); req.max_rx_rings = cpu_to_le16(vf_rx_rings); - req.max_l2_ctxs = cpu_to_le16(4); + req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); req.max_vnics = cpu_to_le16(vf_vnics); req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); @@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_fwd_resp_input req = {0}; struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) + return -EINVAL; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); /* Set the new target id */ @@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_reject_fwd_resp_input req = {0}; struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) + return -EINVAL; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); @@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, struct hwrm_exec_fwd_resp_input req = {0}; struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) + return -EINVAL; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); /* Set the new target id */ req.target_id = cpu_to_le16(vf->fw_fid); @@ -914,7 +923,8 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { if (is_valid_ether_addr(req->dflt_mac_addr) && ((vf->flags & BNXT_VF_TRUST) || - (!is_valid_ether_addr(vf->mac_addr)))) { + !is_valid_ether_addr(vf->mac_addr) || + ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index d10f6f6c7860..e9b20cd19881 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -11,6 +11,23 @@ #ifndef BNXT_SRIOV_H #define BNXT_SRIOV_H +#define BNXT_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \ + sizeof(struct hwrm_fwd_resp_input)) + +#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\ + offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id)) + +#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\ + offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id)) + +#define BNXT_VF_MIN_RSS_CTX 1 +#define BNXT_VF_MAX_RSS_CTX 1 +#define BNXT_VF_MIN_L2_CTX 1 +#define BNXT_VF_MAX_L2_CTX 4 + int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); int bnxt_set_vf_mac(struct net_device *, int, u8 *); int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 1389ab5e05df..1f0e872d0667 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -113,10 +113,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, if (tx_avail != bp->tx_ring_size) *event &= ~BNXT_RX_EVENT; + *len = xdp.data_end - xdp.data; if (orig_data != xdp.data) { offset = xdp.data - xdp.data_hard_start; *data_ptr = xdp.data_hard_start + offset; - *len = xdp.data_end - xdp.data; } switch (act) { case XDP_PASS: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 0445f2c0c629..20c1681bb1af 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -652,7 +652,7 @@ static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, pkts = ring->rx_max_coalesced_frames; if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { - moder = net_dim_get_def_profile(ring->dim.dim.mode); + moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode); usecs = moder.usec; pkts = moder.pkts; } @@ -1925,7 +1925,7 @@ static void bcmgenet_dim_work(struct work_struct *work) struct bcmgenet_rx_ring *ring = container_of(ndim, struct bcmgenet_rx_ring, dim); struct net_dim_cq_moder cur_profile = - net_dim_get_profile(dim->mode, dim->profile_ix); + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); dim->state = NET_DIM_START_MEASURE; @@ -2102,7 +2102,7 @@ static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) /* If DIM was enabled, re-apply default parameters */ if (dim->use_dim) { - moder = net_dim_get_def_profile(dim->dim.mode); + moder = net_dim_get_def_rx_moderation(dim->dim.mode); usecs = moder.usec; pkts = moder.pkts; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index b4c9268100bb..3e93df5d4e3b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -591,16 +591,10 @@ static int macb_mii_init(struct macb *bp) dev_set_drvdata(&bp->dev->dev, bp->mii_bus); np = bp->pdev->dev.of_node; + if (pdata) + bp->mii_bus->phy_mask = pdata->phy_mask; - if (np) { - err = of_mdiobus_register(bp->mii_bus, np); - } else { - if (pdata) - bp->mii_bus->phy_mask = pdata->phy_mask; - - err = mdiobus_register(bp->mii_bus); - } - + err = of_mdiobus_register(bp->mii_bus, np); if (err) goto err_out_free_mdiobus; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index e8b290473ee2..929d485a3a2f 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1245,7 +1245,7 @@ static void cn23xx_setup_reg_address(struct octeon_device *oct) CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); } -static int cn23xx_sriov_config(struct octeon_device *oct) +int cn23xx_sriov_config(struct octeon_device *oct) { struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; u32 max_rings, total_rings, max_vfs, rings_per_vf; @@ -1269,8 +1269,8 @@ static int cn23xx_sriov_config(struct octeon_device *oct) break; } - if (max_rings <= num_present_cpus()) - num_pf_rings = 1; + if (oct->sriov_info.num_pf_rings) + num_pf_rings = oct->sriov_info.num_pf_rings; else num_pf_rings = num_present_cpus(); @@ -1497,3 +1497,57 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, octeon_mbox_write(oct, &mbox_cmd); } } + +static void +cn23xx_get_vf_stats_callback(struct octeon_device *oct, + struct octeon_mbox_cmd *cmd, void *arg) +{ + struct oct_vf_stats_ctx *ctx = arg; + + memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats)); + atomic_set(&ctx->status, 1); +} + +int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx, + struct oct_vf_stats *stats) +{ + u32 timeout = HZ; // 1sec + struct octeon_mbox_cmd mbox_cmd; + struct oct_vf_stats_ctx ctx; + u32 count = 0, ret; + + if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx))) + return -1; + + if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data)) + return -1; + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 1; + mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS; + mbox_cmd.msg.s.len = 1; + mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback; + ctx.stats = stats; + atomic_set(&ctx.status, 0); + mbox_cmd.fn_arg = (void *)&ctx; + memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data)); + octeon_mbox_write(oct, &mbox_cmd); + + do { + schedule_timeout_uninterruptible(1); + } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout)); + + ret = atomic_read(&ctx.status); + if (ret == 0) { + octeon_mbox_cancel(oct, 0); + dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n", + vfidx); + return -1; + } + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index 2aba5247b6d8..e6f31d0d5c0b 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -43,6 +43,15 @@ struct octeon_cn23xx_pf { #define CN23XX_SLI_DEF_BP 0x40 +struct oct_vf_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 broadcast; + u64 multicast; +}; + int setup_cn23xx_octeon_pf_device(struct octeon_device *oct); int validate_cn23xx_pf_config_info(struct octeon_device *oct, @@ -52,8 +61,13 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); +int cn23xx_sriov_config(struct octeon_device *oct); + int cn23xx_fw_loaded(struct octeon_device *oct); void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, u8 *mac); + +int cn23xx_get_vf_stats(struct octeon_device *oct, int ifidx, + struct oct_vf_stats *stats); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 2a94eee943b2..8093c5eafea2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -29,6 +29,162 @@ /* OOM task polling interval */ #define LIO_OOM_POLL_INTERVAL_MS 250 +#define OCTNIC_MAX_SG MAX_SKB_FRAGS + +/** + * \brief Callback for getting interface configuration + * @param status status of request + * @param buf pointer to resp structure + */ +void lio_if_cfg_callback(struct octeon_device *oct, + u32 status __attribute__((unused)), void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct liquidio_if_cfg_context *ctx; + struct liquidio_if_cfg_resp *resp; + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (resp->status) + dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", + CVM_CAST64(resp->status)); + WRITE_ONCE(ctx->cond, 1); + + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", + resp->cfg_info.liquidio_firmware_version); + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + +/** + * \brief Delete gather lists + * @param lio per-network private data + */ +void lio_delete_glists(struct lio *lio) +{ + struct octnic_gather *g; + int i; + + kfree(lio->glist_lock); + lio->glist_lock = NULL; + + if (!lio->glist) + return; + + for (i = 0; i < lio->oct_dev->num_iqs; i++) { + do { + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[i]); + kfree(g); + } while (g); + + if (lio->glists_virt_base && lio->glists_virt_base[i] && + lio->glists_dma_base && lio->glists_dma_base[i]) { + lio_dma_free(lio->oct_dev, + lio->glist_entry_size * lio->tx_qsize, + lio->glists_virt_base[i], + lio->glists_dma_base[i]); + } + } + + kfree(lio->glists_virt_base); + lio->glists_virt_base = NULL; + + kfree(lio->glists_dma_base); + lio->glists_dma_base = NULL; + + kfree(lio->glist); + lio->glist = NULL; +} + +/** + * \brief Setup gather lists + * @param lio per-network private data + */ +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) +{ + struct octnic_gather *g; + int i, j; + + lio->glist_lock = + kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); + if (!lio->glist_lock) + return -ENOMEM; + + lio->glist = + kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); + if (!lio->glist) { + kfree(lio->glist_lock); + lio->glist_lock = NULL; + return -ENOMEM; + } + + lio->glist_entry_size = + ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + + /* allocate memory to store virtual and dma base address of + * per glist consistent memory + */ + lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), + GFP_KERNEL); + lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), + GFP_KERNEL); + + if (!lio->glists_virt_base || !lio->glists_dma_base) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (i = 0; i < num_iqs; i++) { + int numa_node = dev_to_node(&oct->pci_dev->dev); + + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + lio->glists_virt_base[i] = + lio_dma_alloc(oct, + lio->glist_entry_size * lio->tx_qsize, + &lio->glists_dma_base[i]); + + if (!lio->glists_virt_base[i]) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc_node(sizeof(*g), GFP_KERNEL, + numa_node); + if (!g) + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg = lio->glists_virt_base[i] + + (j * lio->glist_entry_size); + + g->sg_dma_ptr = lio->glists_dma_base[i] + + (j * lio->glist_entry_size); + + list_add_tail(&g->list, &lio->glist[i]); + } + + if (j != lio->tx_qsize) { + lio_delete_glists(lio); + return -ENOMEM; + } + } + + return 0; +} + int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) { struct lio *lio = GET_LIO(netdev); @@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) int num_ioq_vectors; int irqret, err; - oct->num_msix_irqs = num_ioqs; if (oct->msix_on) { + oct->num_msix_irqs = num_ioqs; if (OCTEON_CN23XX_PF(oct)) { num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; @@ -1169,3 +1325,355 @@ int lio_wait_for_clean_oq(struct octeon_device *oct) return pending_pkts; } + +static void +octnet_nic_stats_callback(struct octeon_device *oct_dev, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct oct_nic_stats_resp *resp = + (struct oct_nic_stats_resp *)sc->virtrptr; + struct oct_nic_stats_ctrl *ctrl = + (struct oct_nic_stats_ctrl *)sc->ctxptr; + struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; + struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; + struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; + struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; + + if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->stats, + (sizeof(struct oct_link_stats)) >> 3); + + /* RX link-level stats */ + rstats->total_rcvd = rsp_rstats->total_rcvd; + rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; + rstats->total_bcst = rsp_rstats->total_bcst; + rstats->total_mcst = rsp_rstats->total_mcst; + rstats->runts = rsp_rstats->runts; + rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; + /* Accounts for over/under-run of buffers */ + rstats->fifo_err = rsp_rstats->fifo_err; + rstats->dmac_drop = rsp_rstats->dmac_drop; + rstats->fcs_err = rsp_rstats->fcs_err; + rstats->jabber_err = rsp_rstats->jabber_err; + rstats->l2_err = rsp_rstats->l2_err; + rstats->frame_err = rsp_rstats->frame_err; + rstats->red_drops = rsp_rstats->red_drops; + + /* RX firmware stats */ + rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; + rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; + rstats->fw_total_mcast = rsp_rstats->fw_total_mcast; + rstats->fw_total_bcast = rsp_rstats->fw_total_bcast; + rstats->fw_err_pko = rsp_rstats->fw_err_pko; + rstats->fw_err_link = rsp_rstats->fw_err_link; + rstats->fw_err_drop = rsp_rstats->fw_err_drop; + rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; + rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; + + /* Number of packets that are LROed */ + rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; + /* Number of octets that are LROed */ + rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; + /* Number of LRO packets formed */ + rstats->fw_total_lro = rsp_rstats->fw_total_lro; + /* Number of times lRO of packet aborted */ + rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; + rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; + rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; + rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; + rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; + /* intrmod: packet forward rate */ + rstats->fwd_rate = rsp_rstats->fwd_rate; + + /* TX link-level stats */ + tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; + tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; + tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; + tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; + tstats->ctl_sent = rsp_tstats->ctl_sent; + /* Packets sent after one collision*/ + tstats->one_collision_sent = rsp_tstats->one_collision_sent; + /* Packets sent after multiple collision*/ + tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; + /* Packets not sent due to max collisions */ + tstats->max_collision_fail = rsp_tstats->max_collision_fail; + /* Packets not sent due to max deferrals */ + tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; + /* Accounts for over/under-run of buffers */ + tstats->fifo_err = rsp_tstats->fifo_err; + tstats->runts = rsp_tstats->runts; + /* Total number of collisions detected */ + tstats->total_collisions = rsp_tstats->total_collisions; + + /* firmware stats */ + tstats->fw_total_sent = rsp_tstats->fw_total_sent; + tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; + tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent; + tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent; + tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_pki = rsp_tstats->fw_err_pki; + tstats->fw_err_link = rsp_tstats->fw_err_link; + tstats->fw_err_drop = rsp_tstats->fw_err_drop; + tstats->fw_tso = rsp_tstats->fw_tso; + tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; + tstats->fw_err_tso = rsp_tstats->fw_err_tso; + tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; + + resp->status = 1; + } else { + resp->status = -1; + } + complete(&ctrl->complete); +} + +int octnet_get_link_stats(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_nic_stats_ctrl *ctrl; + struct oct_nic_stats_resp *resp; + int retval; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + sizeof(struct octnic_ctrl_pkt)); + + if (!sc) + return -ENOMEM; + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; + memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); + ctrl->netdev = netdev; + init_completion(&ctrl->complete); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + sc->callback = octnet_nic_stats_callback; + sc->callback_arg = sc; + sc->wait_time = 500; /*in milli seconds*/ + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + + if (resp->status != 1) { + octeon_free_soft_command(oct_dev, sc); + + return -EINVAL; + } + + octeon_free_soft_command(oct_dev, sc); + + return 0; +} + +static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct, + u32 status, + void *buf) +{ + struct liquidio_nic_seapi_ctl_context *ctx; + struct octeon_soft_command *sc = buf; + + ctx = sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (status) { + dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n", + __func__, + CVM_CAST64(status)); + } + ctx->status = status; + complete(&ctx->complete); +} + +int liquidio_set_speed(struct lio *lio, int speed) +{ + struct liquidio_nic_seapi_ctl_context *ctx; + struct octeon_device *oct = lio->oct_dev; + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + u32 ctx_size; + int retval; + u32 var; + + if (oct->speed_setting == speed) + return 0; + + if (!OCTEON_CN23XX_PF(oct)) { + dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n", + __func__); + return -EOPNOTSUPP; + } + + ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), + ctx_size); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + ctx = sc->ctxptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + ctx->octeon_id = lio_get_device_id(oct); + ctx->status = 0; + init_completion(&ctx->complete); + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_SPEED_SET; + ncmd->s.param1 = speed; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + sc->callback = liquidio_nic_seapi_ctl_callback; + sc->callback_arg = sc; + sc->wait_time = 5000; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + retval = -EBUSY; + } else { + /* Wait for response or timeout */ + if (wait_for_completion_timeout(&ctx->complete, + msecs_to_jiffies(10000)) == 0) { + dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", + __func__); + octeon_free_soft_command(oct, sc); + return -EINTR; + } + + retval = resp->status; + + if (retval) { + dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", + __func__, retval); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + var = be32_to_cpu((__force __be32)resp->speed); + if (var != speed) { + dev_err(&oct->pci_dev->dev, + "%s: setting failed speed= %x, expect %x\n", + __func__, var, speed); + } + + oct->speed_setting = var; + } + + octeon_free_soft_command(oct, sc); + + return retval; +} + +int liquidio_get_speed(struct lio *lio) +{ + struct liquidio_nic_seapi_ctl_context *ctx; + struct octeon_device *oct = lio->oct_dev; + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + u32 ctx_size; + int retval; + + ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), + ctx_size); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + ctx = sc->ctxptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + ctx->octeon_id = lio_get_device_id(oct); + ctx->status = 0; + init_completion(&ctx->complete); + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_SPEED_GET; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + sc->callback = liquidio_nic_seapi_ctl_callback; + sc->callback_arg = sc; + sc->wait_time = 5000; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + oct->no_speed_setting = 1; + oct->speed_setting = 25; + + retval = -EBUSY; + } else { + if (wait_for_completion_timeout(&ctx->complete, + msecs_to_jiffies(10000)) == 0) { + dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", + __func__); + + oct->speed_setting = 25; + oct->no_speed_setting = 1; + + octeon_free_soft_command(oct, sc); + + return -EINTR; + } + retval = resp->status; + if (retval) { + dev_err(&oct->pci_dev->dev, + "%s failed retval=%d\n", __func__, retval); + oct->no_speed_setting = 1; + oct->speed_setting = 25; + octeon_free_soft_command(oct, sc); + retval = -EIO; + } else { + u32 var; + + var = be32_to_cpu((__force __be32)resp->speed); + oct->speed_setting = var; + if (var == 0xffff) { + oct->no_speed_setting = 1; + /* unable to access boot variables + * get the default value based on the NIC type + */ + oct->speed_setting = 25; + } + } + } + + octeon_free_soft_command(oct, sc); + + return retval; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 550ac29682a5..06f7449c569d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -32,7 +32,6 @@ #include "cn23xx_vf_device.h" static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); -static int octnet_get_link_stats(struct net_device *netdev); struct oct_intrmod_context { int octeon_id; @@ -96,11 +95,9 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_bytes", "tx_bytes", - "rx_errors", /*jabber_err+l2_err+frame_err */ - "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ - "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + - *st->fromwire.dmac_drop + st->fromwire.fw_err_drop - */ + "rx_errors", + "tx_errors", + "rx_dropped", "tx_dropped", "tx_total_sent", @@ -115,14 +112,17 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_tso_err", "tx_vxlan", + "tx_mcast", + "tx_bcast", + "mac_tx_total_pkts", "mac_tx_total_bytes", "mac_tx_mcast_pkts", "mac_tx_bcast_pkts", - "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */ + "mac_tx_ctl_packets", "mac_tx_total_collisions", "mac_tx_one_collision", - "mac_tx_multi_collison", + "mac_tx_multi_collision", "mac_tx_max_collision_fail", "mac_tx_max_deferal_fail", "mac_tx_fifo_err", @@ -130,6 +130,8 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "rx_total_rcvd", "rx_total_fwd", + "rx_mcast", + "rx_bcast", "rx_jabber_err", "rx_l2_err", "rx_frame_err", @@ -170,17 +172,21 @@ static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { "tx_packets", "rx_bytes", "tx_bytes", - "rx_errors", /* jabber_err + l2_err+frame_err */ - "tx_errors", /* fw_err_pko + fw_err_link+fw_err_drop */ - "rx_dropped", /* total_rcvd - fw_total_rcvd + dmac_drop + fw_err_drop */ + "rx_errors", + "tx_errors", + "rx_dropped", "tx_dropped", + "rx_mcast", + "tx_mcast", + "rx_bcast", + "tx_bcast", "link_state_changes", }; /* statistics of host tx queue */ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ - "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/ + "packets", + "bytes", "dropped", "iq_busy", "sgentry_sent", @@ -197,13 +203,9 @@ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { /* statistics of host rx queue */ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */ - "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */ - "dropped", /*oct->droq[oq_no]->stats.rx_dropped+ - *oct->droq[oq_no]->stats.dropped_nodispatch+ - *oct->droq[oq_no]->stats.dropped_toomany+ - *oct->droq[oq_no]->stats.dropped_nomem - */ + "packets", + "bytes", + "dropped", "dropped_nomem", "dropped_toomany", "fw_dropped", @@ -228,46 +230,147 @@ static int lio_get_link_ksettings(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; - u32 supported = 0, advertising = 0; linfo = &lio->linfo; + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + switch (linfo->link.s.phy_type) { case LIO_PHY_PORT_TP: ecmd->base.port = PORT_TP; - supported = (SUPPORTED_10000baseT_Full | - SUPPORTED_TP | SUPPORTED_Pause); - advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); ecmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); + ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + + ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + break; case LIO_PHY_PORT_FIBRE: - ecmd->base.port = PORT_FIBRE; - - if (linfo->link.s.speed == SPEED_10000) { - supported = SUPPORTED_10000baseT_Full; - advertising = ADVERTISED_10000baseT_Full; + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { + dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); + } else { + dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", + linfo->link.s.if_mode); } - supported |= SUPPORTED_FIBRE | SUPPORTED_Pause; - advertising |= ADVERTISED_Pause; + ecmd->base.port = PORT_FIBRE; ecmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); + + ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (OCTEON_CN23XX_PF(oct)) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseCR_Full); + + if (oct->no_speed_setting == 0) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseCR_Full); + } + + if (oct->no_speed_setting == 0) + liquidio_get_speed(lio); + else + oct->speed_setting = 25; + + if (oct->speed_setting == 10) { + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseCR_Full); + } + if (oct->speed_setting == 25) { + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseCR_Full); + } + } else { /* VF */ + if (linfo->link.s.speed == 10000) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseCR_Full); + + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseCR_Full); + } + + if (linfo->link.s.speed == 25000) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseCR_Full); + + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseCR_Full); + } + } + } else { + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + } break; } - if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || - linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || - linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || - linfo->link.s.if_mode == INTERFACE_MODE_XFI) { - ethtool_convert_legacy_u32_to_link_mode( - ecmd->link_modes.supported, supported); - ethtool_convert_legacy_u32_to_link_mode( - ecmd->link_modes.advertising, advertising); - } else { - dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", - linfo->link.s.if_mode); - } - if (linfo->link.s.link_up) { ecmd->base.speed = linfo->link.s.speed; ecmd->base.duplex = linfo->link.s.duplex; @@ -279,6 +382,51 @@ static int lio_get_link_ksettings(struct net_device *netdev, return 0; } +static int lio_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ecmd) +{ + const int speed = ecmd->base.speed; + struct lio *lio = GET_LIO(netdev); + struct oct_link_info *linfo; + struct octeon_device *oct; + u32 is25G = 0; + + oct = lio->oct_dev; + + linfo = &lio->linfo; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + is25G = 1; + } else { + return -EOPNOTSUPP; + } + + if (oct->no_speed_setting) { + dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", + __func__); + return -EOPNOTSUPP; + } + + if ((ecmd->base.duplex != DUPLEX_UNKNOWN && + ecmd->base.duplex != linfo->link.s.duplex) || + ecmd->base.autoneg != AUTONEG_DISABLE || + (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 && + ecmd->base.speed != SPEED_UNKNOWN)) + return -EOPNOTSUPP; + + if ((oct->speed_boot == speed / 1000) && + oct->speed_boot == oct->speed_setting) + return 0; + + liquidio_set_speed(lio, speed / 1000); + + dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n", + oct->speed_setting); + + return 0; +} + static void lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -359,7 +507,14 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - max_combined = lio->linfo.num_txpciq; + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, cn23xx_pf); + + max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); + } combined_count = oct->num_iqs; } else if (OCTEON_CN23XX_VF(oct)) { u64 reg_val = 0ULL; @@ -423,9 +578,15 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; + + if (octeon_allocate_ioq_vector(oct, num_ioqs)) { + dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); + return -1; + } + if (octeon_setup_interrupt(oct, num_ioqs)) { dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); - return 1; + return -1; } /* Enable Octeon device interrupts */ @@ -455,7 +616,16 @@ lio_ethtool_set_channels(struct net_device *dev, combined_count = channel->combined_count; if (OCTEON_CN23XX_PF(oct)) { - max_combined = channel->max_combined; + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, + cn23xx_pf); + + max_combined = + CFG_GET_IQ_MAX_Q(conf23_pf); + } } else if (OCTEON_CN23XX_VF(oct)) { u64 reg_val = 0ULL; u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); @@ -483,7 +653,6 @@ lio_ethtool_set_channels(struct net_device *dev, if (lio_reset_queues(dev, combined_count)) return -EINVAL; - lio_irq_reallocate_irqs(oct, combined_count); if (stopped) dev->netdev_ops->ndo_open(dev); @@ -822,12 +991,120 @@ lio_ethtool_get_ringparam(struct net_device *netdev, ering->rx_jumbo_max_pending = 0; } +static int lio_23xx_reconfigure_queue_count(struct lio *lio) +{ + struct octeon_device *oct = lio->oct_dev; + struct liquidio_if_cfg_context *ctx; + u32 resp_size, ctx_size, data_size; + struct liquidio_if_cfg_resp *resp; + struct octeon_soft_command *sc; + union oct_nic_if_cfg if_cfg; + struct lio_version *vdata; + u32 ifidx_or_pfnum; + int retval; + int j; + + resp_size = sizeof(struct liquidio_if_cfg_resp); + ctx_size = sizeof(struct liquidio_if_cfg_context); + data_size = sizeof(struct lio_version); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, data_size, + resp_size, ctx_size); + if (!sc) { + dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", + __func__); + return -1; + } + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + vdata = (struct lio_version *)sc->virtdptr; + + vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); + vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); + vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); + + ifidx_or_pfnum = oct->pf_num; + WRITE_ONCE(ctx->cond, 0); + ctx->octeon_id = lio_get_device_id(oct); + init_waitqueue_head(&ctx->wc); + + if_cfg.u64 = 0; + if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.base_queue = oct->sriov_info.pf_srn; + if_cfg.s.gmx_port_id = oct->pf_num; + + sc->iq_no = 0; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_QCOUNT_UPDATE, 0, + if_cfg.u64, 0); + sc->callback = lio_if_cfg_callback; + sc->callback_arg = sc; + sc->wait_time = LIO_IFCFG_WAIT_TIME; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, + "iq/oq config failed status: %x\n", + retval); + goto qcount_update_fail; + } + + if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { + dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); + return -1; + } + + retval = resp->status; + if (retval) { + dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); + goto qcount_update_fail; + } + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + lio->ifidx = ifidx_or_pfnum; + lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); + lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); + for (j = 0; j < lio->linfo.num_rxpciq; j++) { + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; + } + + for (j = 0; j < lio->linfo.num_txpciq; j++) { + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; + } + + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + + octeon_free_soft_command(oct, sc); + dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", + lio->linfo.num_rxpciq); + + return 0; + +qcount_update_fail: + octeon_free_soft_command(oct, sc); + + return -1; +} + static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + int i, queue_count_update = 0; struct napi_struct *napi, *n; - int i, update = 0; + int ret; + + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); if (wait_for_pending_requests(oct)) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); @@ -836,7 +1113,7 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); if (octeon_set_io_queues_off(oct)) { - dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); + dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); return -1; } @@ -849,9 +1126,40 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) netif_napi_del(napi); if (num_qs != oct->num_iqs) { - netif_set_real_num_rx_queues(netdev, num_qs); - netif_set_real_num_tx_queues(netdev, num_qs); - update = 1; + ret = netif_set_real_num_rx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number rx failed\n"); + return ret; + } + + ret = netif_set_real_num_tx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number tx failed\n"); + return ret; + } + + /* The value of queue_count_update decides whether it is the + * queue count or the descriptor count that is being + * re-configured. + */ + queue_count_update = 1; + } + + /* Re-configuration of queues can happen in two scenarios, SRIOV enabled + * and SRIOV disabled. Few things like recreating queue zero, resetting + * glists and IRQs are required for both. For the latter, some more + * steps like updating sriov_info for the octeon device need to be done. + */ + if (queue_count_update) { + lio_delete_glists(lio); + + /* Delete mbox for PF which is SRIOV disabled because sriov_info + * will be now changed. + */ + if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) + oct->fn_list.free_mbox(oct); } for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { @@ -866,24 +1174,91 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) octeon_delete_instr_queue(oct, i); } + if (queue_count_update) { + /* For PF re-configure sriov related information */ + if ((OCTEON_CN23XX_PF(oct)) && + !oct->sriov_info.sriov_enabled) { + oct->sriov_info.num_pf_rings = num_qs; + if (cn23xx_sriov_config(oct)) { + dev_err(&oct->pci_dev->dev, + "Queue reset aborted: SRIOV config failed\n"); + return -1; + } + + num_qs = oct->sriov_info.num_pf_rings; + } + } + if (oct->fn_list.setup_device_regs(oct)) { dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); return -1; } - if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { - dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); - return -1; + /* The following are needed in case of queue count re-configuration and + * not for descriptor count re-configuration. + */ + if (queue_count_update) { + if (octeon_setup_instr_queues(oct)) + return -1; + + if (octeon_setup_output_queues(oct)) + return -1; + + /* Recreating mbox for PF that is SRIOV disabled */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (oct->fn_list.setup_mbox(oct)) { + dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); + return -1; + } + } + + /* Deleting and recreating IRQs whether the interface is SRIOV + * enabled or disabled. + */ + if (lio_irq_reallocate_irqs(oct, num_qs)) { + dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); + return -1; + } + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); + return -1; + } + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->droq[i]->max_count, + oct->droq[i]->pkts_credit_reg); + + /* Informing firmware about the new queue count. It is required + * for firmware to allocate more number of queues than those at + * load time. + */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (lio_23xx_reconfigure_queue_count(lio)) + return -1; + } } - /* Enable the input and output queues for this Octeon device */ - if (oct->fn_list.enable_io_queues(oct)) { - dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); + /* Once firmware is aware of the new value, queues can be recreated */ + if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { + dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); return -1; } - if (update && lio_send_queue_count_update(netdev, num_qs)) - return -1; + if (queue_count_update) { + if (lio_setup_glists(oct, lio, num_qs)) { + dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); + return -1; + } + + /* Send firmware the information about new number of queues + * if the interface is a VF or a PF that is SRIOV enabled. + */ + if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) + if (lio_send_queue_count_update(netdev, num_qs)) + return -1; + } return 0; } @@ -928,7 +1303,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev, CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, rx_count); - if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) + if (lio_reset_queues(netdev, oct->num_iqs)) goto err_lio_reset_queues; if (stopped) @@ -1063,33 +1438,48 @@ lio_get_ethtool_stats(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; - struct net_device_stats *netstats = &netdev->stats; + struct rtnl_link_stats64 lstats; int i = 0, j; if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) return; - netdev->netdev_ops->ndo_get_stats(netdev); - octnet_get_link_stats(netdev); - + netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ - data[i++] = CVM_CAST64(netstats->rx_packets); + data[i++] = lstats.rx_packets; /*sum of oct->instr_queue[iq_no]->stats.tx_done */ - data[i++] = CVM_CAST64(netstats->tx_packets); + data[i++] = lstats.tx_packets; /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ - data[i++] = CVM_CAST64(netstats->rx_bytes); + data[i++] = lstats.rx_bytes; /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ - data[i++] = CVM_CAST64(netstats->tx_bytes); - data[i++] = CVM_CAST64(netstats->rx_errors); - data[i++] = CVM_CAST64(netstats->tx_errors); + data[i++] = lstats.tx_bytes; + data[i++] = lstats.rx_errors + + oct_dev->link_stats.fromwire.fcs_err + + oct_dev->link_stats.fromwire.jabber_err + + oct_dev->link_stats.fromwire.l2_err + + oct_dev->link_stats.fromwire.frame_err; + data[i++] = lstats.tx_errors; /*sum of oct->droq[oq_no]->stats->rx_dropped + *oct->droq[oq_no]->stats->dropped_nodispatch + *oct->droq[oq_no]->stats->dropped_toomany + *oct->droq[oq_no]->stats->dropped_nomem */ - data[i++] = CVM_CAST64(netstats->rx_dropped); + data[i++] = lstats.rx_dropped + + oct_dev->link_stats.fromwire.fifo_err + + oct_dev->link_stats.fromwire.dmac_drop + + oct_dev->link_stats.fromwire.red_drops + + oct_dev->link_stats.fromwire.fw_err_pko + + oct_dev->link_stats.fromwire.fw_err_link + + oct_dev->link_stats.fromwire.fw_err_drop; /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = CVM_CAST64(netstats->tx_dropped); + data[i++] = lstats.tx_dropped + + oct_dev->link_stats.fromhost.max_collision_fail + + oct_dev->link_stats.fromhost.max_deferral_fail + + oct_dev->link_stats.fromhost.total_collisions + + oct_dev->link_stats.fromhost.fw_err_pko + + oct_dev->link_stats.fromhost.fw_err_link + + oct_dev->link_stats.fromhost.fw_err_drop + + oct_dev->link_stats.fromhost.fw_err_pki; /* firmware tx stats */ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. @@ -1124,6 +1514,10 @@ lio_get_ethtool_stats(struct net_device *netdev, */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); + /* Multicast packets sent by this port */ + data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; + data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; + /* mac tx statistics */ /*CVMX_BGXX_CMRX_TX_STAT5 */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); @@ -1160,6 +1554,9 @@ lio_get_ethtool_stats(struct net_device *netdev, *fw_total_fwd */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); + /* Multicast packets received on this port */ + data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; + data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ @@ -1328,7 +1725,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, __attribute__((unused)), u64 *data) { - struct net_device_stats *netstats = &netdev->stats; + struct rtnl_link_stats64 lstats; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int i = 0, j, vj; @@ -1336,25 +1733,31 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) return; - netdev->netdev_ops->ndo_get_stats(netdev); + netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ - data[i++] = CVM_CAST64(netstats->rx_packets); + data[i++] = lstats.rx_packets; /* sum of oct->instr_queue[iq_no]->stats.tx_done */ - data[i++] = CVM_CAST64(netstats->tx_packets); + data[i++] = lstats.tx_packets; /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ - data[i++] = CVM_CAST64(netstats->rx_bytes); + data[i++] = lstats.rx_bytes; /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ - data[i++] = CVM_CAST64(netstats->tx_bytes); - data[i++] = CVM_CAST64(netstats->rx_errors); - data[i++] = CVM_CAST64(netstats->tx_errors); + data[i++] = lstats.tx_bytes; + data[i++] = lstats.rx_errors; + data[i++] = lstats.tx_errors; /* sum of oct->droq[oq_no]->stats->rx_dropped + * oct->droq[oq_no]->stats->dropped_nodispatch + * oct->droq[oq_no]->stats->dropped_toomany + * oct->droq[oq_no]->stats->dropped_nomem */ - data[i++] = CVM_CAST64(netstats->rx_dropped); + data[i++] = lstats.rx_dropped; /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = CVM_CAST64(netstats->tx_dropped); + data[i++] = lstats.tx_dropped; + + data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; + data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; + data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; + data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; + /* lio->link_changes */ data[i++] = CVM_CAST64(lio->link_changes); @@ -1765,161 +2168,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio, return -EINTR; } -static void -octnet_nic_stats_callback(struct octeon_device *oct_dev, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct oct_nic_stats_resp *resp = - (struct oct_nic_stats_resp *)sc->virtrptr; - struct oct_nic_stats_ctrl *ctrl = - (struct oct_nic_stats_ctrl *)sc->ctxptr; - struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; - struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; - - struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; - struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; - - if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { - octeon_swap_8B_data((u64 *)&resp->stats, - (sizeof(struct oct_link_stats)) >> 3); - - /* RX link-level stats */ - rstats->total_rcvd = rsp_rstats->total_rcvd; - rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; - rstats->total_bcst = rsp_rstats->total_bcst; - rstats->total_mcst = rsp_rstats->total_mcst; - rstats->runts = rsp_rstats->runts; - rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; - /* Accounts for over/under-run of buffers */ - rstats->fifo_err = rsp_rstats->fifo_err; - rstats->dmac_drop = rsp_rstats->dmac_drop; - rstats->fcs_err = rsp_rstats->fcs_err; - rstats->jabber_err = rsp_rstats->jabber_err; - rstats->l2_err = rsp_rstats->l2_err; - rstats->frame_err = rsp_rstats->frame_err; - - /* RX firmware stats */ - rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; - rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; - rstats->fw_err_pko = rsp_rstats->fw_err_pko; - rstats->fw_err_link = rsp_rstats->fw_err_link; - rstats->fw_err_drop = rsp_rstats->fw_err_drop; - rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; - rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; - - /* Number of packets that are LROed */ - rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; - /* Number of octets that are LROed */ - rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; - /* Number of LRO packets formed */ - rstats->fw_total_lro = rsp_rstats->fw_total_lro; - /* Number of times lRO of packet aborted */ - rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; - rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; - rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; - rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; - rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; - /* intrmod: packet forward rate */ - rstats->fwd_rate = rsp_rstats->fwd_rate; - - /* TX link-level stats */ - tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; - tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; - tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; - tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; - tstats->ctl_sent = rsp_tstats->ctl_sent; - /* Packets sent after one collision*/ - tstats->one_collision_sent = rsp_tstats->one_collision_sent; - /* Packets sent after multiple collision*/ - tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; - /* Packets not sent due to max collisions */ - tstats->max_collision_fail = rsp_tstats->max_collision_fail; - /* Packets not sent due to max deferrals */ - tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; - /* Accounts for over/under-run of buffers */ - tstats->fifo_err = rsp_tstats->fifo_err; - tstats->runts = rsp_tstats->runts; - /* Total number of collisions detected */ - tstats->total_collisions = rsp_tstats->total_collisions; - - /* firmware stats */ - tstats->fw_total_sent = rsp_tstats->fw_total_sent; - tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; - tstats->fw_err_pko = rsp_tstats->fw_err_pko; - tstats->fw_err_pki = rsp_tstats->fw_err_pki; - tstats->fw_err_link = rsp_tstats->fw_err_link; - tstats->fw_err_drop = rsp_tstats->fw_err_drop; - tstats->fw_tso = rsp_tstats->fw_tso; - tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; - tstats->fw_err_tso = rsp_tstats->fw_err_tso; - tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; - - resp->status = 1; - } else { - resp->status = -1; - } - complete(&ctrl->complete); -} - -/* Configure interrupt moderation parameters */ -static int octnet_get_link_stats(struct net_device *netdev) -{ - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct_dev = lio->oct_dev; - - struct octeon_soft_command *sc; - struct oct_nic_stats_ctrl *ctrl; - struct oct_nic_stats_resp *resp; - - int retval; - - /* Alloc soft command */ - sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct_dev, - 0, - sizeof(struct oct_nic_stats_resp), - sizeof(struct octnic_ctrl_pkt)); - - if (!sc) - return -ENOMEM; - - resp = (struct oct_nic_stats_resp *)sc->virtrptr; - memset(resp, 0, sizeof(struct oct_nic_stats_resp)); - - ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; - memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); - ctrl->netdev = netdev; - init_completion(&ctrl->complete); - - sc->iq_no = lio->linfo.txpciq[0].s.q_no; - - octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, - OPCODE_NIC_PORT_STATS, 0, 0, 0); - - sc->callback = octnet_nic_stats_callback; - sc->callback_arg = sc; - sc->wait_time = 500; /*in milli seconds*/ - - retval = octeon_send_soft_command(oct_dev, sc); - if (retval == IQ_SEND_FAILED) { - octeon_free_soft_command(oct_dev, sc); - return -EINVAL; - } - - wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); - - if (resp->status != 1) { - octeon_free_soft_command(oct_dev, sc); - - return -EINVAL; - } - - octeon_free_soft_command(oct_dev, sc); - - return 0; -} - static int lio_get_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *intr_coal) { @@ -2864,6 +3112,7 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags) static const struct ethtool_ops lio_ethtool_ops = { .get_link_ksettings = lio_get_link_ksettings, + .set_link_ksettings = lio_set_link_ksettings, .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 603a144d3d9c..8a815bb57177 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -138,33 +138,10 @@ union tx_info { * by this structure in the NIC module. */ -#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) - #define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) -/** Structure of a node in list of gather components maintained by - * NIC driver for each network device. - */ -struct octnic_gather { - /** List manipulation. Next and prev pointers. */ - struct list_head list; - - /** Size of the gather component at sg in bytes. */ - int sg_size; - - /** Number of bytes that sg was adjusted to make it 8B-aligned. */ - int adjust; - - /** Gather component that can accommodate max sized fragment list - * received from the IP layer. - */ - struct octeon_sg_entry *sg; - - dma_addr_t sg_dma_ptr; -}; - struct handshake { struct completion init; struct completion started; @@ -520,7 +497,7 @@ static void liquidio_deinit_pci(void) */ static inline int check_txq_status(struct lio *lio) { - int numqs = lio->netdev->num_tx_queues; + int numqs = lio->netdev->real_num_tx_queues; int ret_val = 0; int q, iq; @@ -542,148 +519,6 @@ static inline int check_txq_status(struct lio *lio) } /** - * Remove the node at the head of the list. The list would be empty at - * the end of this call if there are no more nodes in the list. - */ -static inline struct list_head *list_delete_head(struct list_head *root) -{ - struct list_head *node; - - if ((root->prev == root) && (root->next == root)) - node = NULL; - else - node = root->next; - - if (node) - list_del(node); - - return node; -} - -/** - * \brief Delete gather lists - * @param lio per-network private data - */ -static void delete_glists(struct lio *lio) -{ - struct octnic_gather *g; - int i; - - kfree(lio->glist_lock); - lio->glist_lock = NULL; - - if (!lio->glist) - return; - - for (i = 0; i < lio->linfo.num_txpciq; i++) { - do { - g = (struct octnic_gather *) - list_delete_head(&lio->glist[i]); - if (g) - kfree(g); - } while (g); - - if (lio->glists_virt_base && lio->glists_virt_base[i] && - lio->glists_dma_base && lio->glists_dma_base[i]) { - lio_dma_free(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - lio->glists_virt_base[i], - lio->glists_dma_base[i]); - } - } - - kfree(lio->glists_virt_base); - lio->glists_virt_base = NULL; - - kfree(lio->glists_dma_base); - lio->glists_dma_base = NULL; - - kfree(lio->glist); - lio->glist = NULL; -} - -/** - * \brief Setup gather lists - * @param lio per-network private data - */ -static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) -{ - int i, j; - struct octnic_gather *g; - - lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), - GFP_KERNEL); - if (!lio->glist_lock) - return -ENOMEM; - - lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), - GFP_KERNEL); - if (!lio->glist) { - kfree(lio->glist_lock); - lio->glist_lock = NULL; - return -ENOMEM; - } - - lio->glist_entry_size = - ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); - - /* allocate memory to store virtual and dma base address of - * per glist consistent memory - */ - lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), - GFP_KERNEL); - lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), - GFP_KERNEL); - - if (!lio->glists_virt_base || !lio->glists_dma_base) { - delete_glists(lio); - return -ENOMEM; - } - - for (i = 0; i < num_iqs; i++) { - int numa_node = dev_to_node(&oct->pci_dev->dev); - - spin_lock_init(&lio->glist_lock[i]); - - INIT_LIST_HEAD(&lio->glist[i]); - - lio->glists_virt_base[i] = - lio_dma_alloc(oct, - lio->glist_entry_size * lio->tx_qsize, - &lio->glists_dma_base[i]); - - if (!lio->glists_virt_base[i]) { - delete_glists(lio); - return -ENOMEM; - } - - for (j = 0; j < lio->tx_qsize; j++) { - g = kzalloc_node(sizeof(*g), GFP_KERNEL, - numa_node); - if (!g) - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; - - g->sg = lio->glists_virt_base[i] + - (j * lio->glist_entry_size); - - g->sg_dma_ptr = lio->glists_dma_base[i] + - (j * lio->glist_entry_size); - - list_add_tail(&g->list, &lio->glist[i]); - } - - if (j != lio->tx_qsize) { - delete_glists(lio); - return -ENOMEM; - } - } - - return 0; -} - -/** * \brief Print link information * @param netdev network device */ @@ -1077,6 +912,9 @@ liquidio_probe(struct pci_dev *pdev, /* set linux specific device pointer */ oct_dev->pci_dev = (void *)pdev; + oct_dev->subsystem_id = pdev->subsystem_vendor | + (pdev->subsystem_device << 16); + hs = &handshake[oct_dev->octeon_id]; init_completion(&hs->init); init_completion(&hs->started); @@ -1471,7 +1309,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) cleanup_rx_oom_poll_fn(netdev); - delete_glists(lio); + lio_delete_glists(lio); free_netdev(netdev); @@ -1686,7 +1524,7 @@ static void free_netsgbuf(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); spin_unlock(&lio->glist_lock[iq]); @@ -1729,7 +1567,7 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1928,7 +1766,7 @@ static int load_firmware(struct octeon_device *oct) ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); if (ret) { - dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", + dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", fw_name); release_firmware(fw); return ret; @@ -1942,39 +1780,6 @@ static int load_firmware(struct octeon_device *oct) } /** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -static void if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), - void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_resp *resp; - struct liquidio_if_cfg_context *ctx; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n", - CVM_CAST64(resp->status), status); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Poll routine for checking transmit queue status * @param work work_struct data structure */ @@ -2049,11 +1854,6 @@ static int liquidio_open(struct net_device *netdev) ifstate_set(lio, LIO_IFSTATE_RUNNING); - /* Ready for link status updates */ - lio->intf_open = 1; - - netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - if (OCTEON_CN23XX_PF(oct)) { if (!oct->msix_on) if (setup_tx_poll_fn(netdev)) @@ -2063,7 +1863,12 @@ static int liquidio_open(struct net_device *netdev) return -1; } - start_txqs(netdev); + netif_tx_start_all_queues(netdev); + + /* Ready for link status updates */ + lio->intf_open = 1; + + netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -2086,11 +1891,15 @@ static int liquidio_stop(struct net_device *netdev) ifstate_reset(lio, LIO_IFSTATE_RUNNING); - netif_tx_disable(netdev); + /* Stop any link updates */ + lio->intf_open = 0; + + stop_txqs(netdev); /* Inform that netif carrier is down */ netif_carrier_off(netdev); - lio->intf_open = 0; + netif_tx_disable(netdev); + lio->linfo.link.s.link_up = 0; lio->link_changes++; @@ -2252,14 +2061,11 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return 0; } -/** - * \brief Net device get_stats - * @param netdev network device - */ -static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) +static void +liquidio_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *lstats) { struct lio *lio = GET_LIO(netdev); - struct net_device_stats *stats = &netdev->stats; struct octeon_device *oct; u64 pkts = 0, drop = 0, bytes = 0; struct oct_droq_stats *oq_stats; @@ -2269,7 +2075,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) - return stats; + return; for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; @@ -2279,9 +2085,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += iq_stats->tx_tot_bytes; } - stats->tx_packets = pkts; - stats->tx_bytes = bytes; - stats->tx_dropped = drop; + lstats->tx_packets = pkts; + lstats->tx_bytes = bytes; + lstats->tx_dropped = drop; pkts = 0; drop = 0; @@ -2298,11 +2104,34 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += oq_stats->rx_bytes_received; } - stats->rx_bytes = bytes; - stats->rx_packets = pkts; - stats->rx_dropped = drop; - - return stats; + lstats->rx_bytes = bytes; + lstats->rx_packets = pkts; + lstats->rx_dropped = drop; + + octnet_get_link_stats(netdev); + lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; + lstats->collisions = oct->link_stats.fromhost.total_collisions; + + /* detailed rx_errors: */ + lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; + /* recved pkt with crc error */ + lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; + /* recv'd frame alignment error */ + lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; + /* recv'r fifo overrun */ + lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; + + lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + + lstats->rx_frame_errors + lstats->rx_fifo_errors; + + /* detailed tx_errors */ + lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; + lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; + lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; + + lstats->tx_errors = lstats->tx_aborted_errors + + lstats->tx_carrier_errors + + lstats->tx_fifo_errors; } /** @@ -2510,7 +2339,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - q_idx = skb_iq(lio, skb); + q_idx = skb_iq(oct, skb); tag = q_idx; iq_no = lio->linfo.txpciq[q_idx].s.q_no; @@ -2585,6 +2414,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", __func__); + stats->tx_dmamap_fail++; return NETDEV_TX_BUSY; } @@ -2602,7 +2432,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) spin_lock(&lio->glist_lock[q_idx]); g = (struct octnic_gather *) - list_delete_head(&lio->glist[q_idx]); + lio_list_delete_head(&lio->glist[q_idx]); spin_unlock(&lio->glist_lock[q_idx]); if (!g) { @@ -2624,6 +2454,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", __func__); + stats->tx_dmamap_fail++; return NETDEV_TX_BUSY; } add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); @@ -3324,11 +3155,36 @@ static const struct switchdev_ops lio_pf_switchdev_ops = { .switchdev_port_attr_get = lio_pf_switchdev_attr_get, }; +static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, + struct ifla_vf_stats *vf_stats) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct oct_vf_stats stats; + int ret; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + memset(&stats, 0, sizeof(struct oct_vf_stats)); + ret = cn23xx_get_vf_stats(oct, vfidx, &stats); + if (!ret) { + vf_stats->rx_packets = stats.rx_packets; + vf_stats->tx_packets = stats.tx_packets; + vf_stats->rx_bytes = stats.rx_bytes; + vf_stats->tx_bytes = stats.tx_bytes; + vf_stats->broadcast = stats.broadcast; + vf_stats->multicast = stats.multicast; + } + + return ret; +} + static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, .ndo_start_xmit = liquidio_xmit, - .ndo_get_stats = liquidio_get_stats, + .ndo_get_stats64 = liquidio_get_stats64, .ndo_set_mac_address = liquidio_set_mac, .ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_tx_timeout = liquidio_tx_timeout, @@ -3346,6 +3202,7 @@ static const struct net_device_ops lionetdevops = { .ndo_get_vf_config = liquidio_get_vf_config, .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, + .ndo_get_vf_stats = liquidio_get_vf_stats, }; /** \brief Entry point for the liquidio module @@ -3448,6 +3305,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) struct liquidio_if_cfg_resp *resp; struct octdev_props *props; int retval, num_iqueues, num_oqueues; + int max_num_queues = 0; union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; @@ -3528,9 +3386,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = if_cfg_callback; + sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; - sc->wait_time = 3000; + sc->wait_time = LIO_IFCFG_WAIT_TIME; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -3584,11 +3442,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) resp->cfg_info.oqmask); goto setup_nic_dev_fail; } + + if (OCTEON_CN6XXX(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn6xxx)); + } else if (OCTEON_CN23XX_PF(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn23xx_pf)); + } + dev_dbg(&octeon_dev->pci_dev->dev, - "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, - num_iqueues, num_oqueues); - netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); + num_iqueues, num_oqueues, max_num_queues); + netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); @@ -3603,6 +3470,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->netdev_ops = &lionetdevops; SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); + retval = netif_set_real_num_rx_queues(netdev, num_oqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number rx failed\n"); + goto setup_nic_dev_fail; + } + + retval = netif_set_real_num_tx_queues(netdev, num_iqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number tx failed\n"); + goto setup_nic_dev_fail; + } + lio = GET_LIO(netdev); memset(lio, 0, sizeof(struct lio)); @@ -3724,7 +3605,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glists(octeon_dev, lio, num_iqueues)) { + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; @@ -3786,6 +3667,23 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "NIC ifidx:%d Setup successful\n", i); octeon_free_soft_command(octeon_dev, sc); + + if (octeon_dev->subsystem_id == + OCTEON_CN2350_25GB_SUBSYS_ID || + octeon_dev->subsystem_id == + OCTEON_CN2360_25GB_SUBSYS_ID) { + liquidio_get_speed(lio); + + if (octeon_dev->speed_setting == 0) { + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } + } else { + octeon_dev->no_speed_setting = 1; + octeon_dev->speed_setting = 10; + } + octeon_dev->speed_boot = octeon_dev->speed_setting; + } devlink = devlink_alloc(&liquidio_devlink_ops, @@ -4223,7 +4121,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) } atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); - if (octeon_allocate_ioq_vector(octeon_dev)) { + if (octeon_allocate_ioq_vector + (octeon_dev, + octeon_dev->sriov_info.num_pf_rings)) { dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); return 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index f92dfa411de6..7fa0212873ac 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -69,30 +69,10 @@ union tx_info { } s; }; -#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) - #define OCTNIC_GSO_MAX_HEADER_SIZE 128 #define OCTNIC_GSO_MAX_SIZE \ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) -struct octnic_gather { - /* List manipulation. Next and prev pointers. */ - struct list_head list; - - /* Size of the gather component at sg in bytes. */ - int sg_size; - - /* Number of bytes that sg was adjusted to make it 8B-aligned. */ - int adjust; - - /* Gather component that can accommodate max sized fragment list - * received from the IP layer. - */ - struct octeon_sg_entry *sg; - - dma_addr_t sg_dma_ptr; -}; - static int liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void liquidio_vf_remove(struct pci_dev *pdev); @@ -285,142 +265,6 @@ static struct pci_driver liquidio_vf_pci_driver = { }; /** - * Remove the node at the head of the list. The list would be empty at - * the end of this call if there are no more nodes in the list. - */ -static struct list_head *list_delete_head(struct list_head *root) -{ - struct list_head *node; - - if ((root->prev == root) && (root->next == root)) - node = NULL; - else - node = root->next; - - if (node) - list_del(node); - - return node; -} - -/** - * \brief Delete gather lists - * @param lio per-network private data - */ -static void delete_glists(struct lio *lio) -{ - struct octnic_gather *g; - int i; - - kfree(lio->glist_lock); - lio->glist_lock = NULL; - - if (!lio->glist) - return; - - for (i = 0; i < lio->linfo.num_txpciq; i++) { - do { - g = (struct octnic_gather *) - list_delete_head(&lio->glist[i]); - kfree(g); - } while (g); - - if (lio->glists_virt_base && lio->glists_virt_base[i] && - lio->glists_dma_base && lio->glists_dma_base[i]) { - lio_dma_free(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - lio->glists_virt_base[i], - lio->glists_dma_base[i]); - } - } - - kfree(lio->glists_virt_base); - lio->glists_virt_base = NULL; - - kfree(lio->glists_dma_base); - lio->glists_dma_base = NULL; - - kfree(lio->glist); - lio->glist = NULL; -} - -/** - * \brief Setup gather lists - * @param lio per-network private data - */ -static int setup_glists(struct lio *lio, int num_iqs) -{ - struct octnic_gather *g; - int i, j; - - lio->glist_lock = - kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL); - if (!lio->glist_lock) - return -ENOMEM; - - lio->glist = - kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL); - if (!lio->glist) { - kfree(lio->glist_lock); - lio->glist_lock = NULL; - return -ENOMEM; - } - - lio->glist_entry_size = - ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); - - /* allocate memory to store virtual and dma base address of - * per glist consistent memory - */ - lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), - GFP_KERNEL); - lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), - GFP_KERNEL); - - if (!lio->glists_virt_base || !lio->glists_dma_base) { - delete_glists(lio); - return -ENOMEM; - } - - for (i = 0; i < num_iqs; i++) { - spin_lock_init(&lio->glist_lock[i]); - - INIT_LIST_HEAD(&lio->glist[i]); - - lio->glists_virt_base[i] = - lio_dma_alloc(lio->oct_dev, - lio->glist_entry_size * lio->tx_qsize, - &lio->glists_dma_base[i]); - - if (!lio->glists_virt_base[i]) { - delete_glists(lio); - return -ENOMEM; - } - - for (j = 0; j < lio->tx_qsize; j++) { - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; - - g->sg = lio->glists_virt_base[i] + - (j * lio->glist_entry_size); - - g->sg_dma_ptr = lio->glists_dma_base[i] + - (j * lio->glist_entry_size); - - list_add_tail(&g->list, &lio->glist[i]); - } - - if (j != lio->tx_qsize) { - delete_glists(lio); - return -ENOMEM; - } - } - - return 0; -} - -/** * \brief Print link information * @param netdev network device */ @@ -567,6 +411,9 @@ liquidio_vf_probe(struct pci_dev *pdev, /* set linux specific device pointer */ oct_dev->pci_dev = pdev; + oct_dev->subsystem_id = pdev->subsystem_vendor | + (pdev->subsystem_device << 16); + if (octeon_device_init(oct_dev)) { liquidio_vf_remove(pdev); return -ENOMEM; @@ -856,7 +703,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) cleanup_link_status_change_wq(netdev); - delete_glists(lio); + lio_delete_glists(lio); free_netdev(netdev); @@ -1005,7 +852,7 @@ static void free_netsgbuf(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1049,7 +896,7 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - iq = skb_iq(lio, skb); + iq = skb_iq(lio->oct_dev, skb); spin_lock(&lio->glist_lock[iq]); list_add_tail(&g->list, &lio->glist[iq]); @@ -1059,38 +906,6 @@ static void free_netsgbuf_with_resp(void *buf) } /** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -static void if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - struct liquidio_if_cfg_resp *resp; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", - CVM_CAST64(resp->status)); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Net device open for LiquidIO * @param netdev network device */ @@ -1336,24 +1151,21 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) return 0; } -/** - * \brief Net device get_stats - * @param netdev network device - */ -static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) +static void +liquidio_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *lstats) { struct lio *lio = GET_LIO(netdev); - struct net_device_stats *stats = &netdev->stats; + struct octeon_device *oct; u64 pkts = 0, drop = 0, bytes = 0; struct oct_droq_stats *oq_stats; struct oct_iq_stats *iq_stats; - struct octeon_device *oct; int i, iq_no, oq_no; oct = lio->oct_dev; if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) - return stats; + return; for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; @@ -1363,9 +1175,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += iq_stats->tx_tot_bytes; } - stats->tx_packets = pkts; - stats->tx_bytes = bytes; - stats->tx_dropped = drop; + lstats->tx_packets = pkts; + lstats->tx_bytes = bytes; + lstats->tx_dropped = drop; pkts = 0; drop = 0; @@ -1382,11 +1194,29 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes += oq_stats->rx_bytes_received; } - stats->rx_bytes = bytes; - stats->rx_packets = pkts; - stats->rx_dropped = drop; + lstats->rx_bytes = bytes; + lstats->rx_packets = pkts; + lstats->rx_dropped = drop; + + octnet_get_link_stats(netdev); + lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; + + /* detailed rx_errors: */ + lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; + /* recved pkt with crc error */ + lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; + /* recv'd frame alignment error */ + lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; - return stats; + lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + + lstats->rx_frame_errors; + + /* detailed tx_errors */ + lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; + lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; + + lstats->tx_errors = lstats->tx_aborted_errors + + lstats->tx_carrier_errors; } /** @@ -1580,7 +1410,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) lio = GET_LIO(netdev); oct = lio->oct_dev; - q_idx = skb_iq(lio, skb); + q_idx = skb_iq(lio->oct_dev, skb); tag = q_idx; iq_no = lio->linfo.txpciq[q_idx].s.q_no; @@ -1661,8 +1491,8 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) int i, frags; spin_lock(&lio->glist_lock[q_idx]); - g = (struct octnic_gather *)list_delete_head( - &lio->glist[q_idx]); + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[q_idx]); spin_unlock(&lio->glist_lock[q_idx]); if (!g) { @@ -2034,7 +1864,7 @@ static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, .ndo_start_xmit = liquidio_xmit, - .ndo_get_stats = liquidio_get_stats, + .ndo_get_stats64 = liquidio_get_stats64, .ndo_set_mac_address = liquidio_set_mac, .ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_tx_timeout = liquidio_tx_timeout, @@ -2156,7 +1986,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = if_cfg_callback; + sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; sc->wait_time = 5000; @@ -2273,6 +2103,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->features = (lio->dev_capability & ~NETIF_F_LRO); netdev->hw_features = lio->dev_capability; + netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; /* MTU range: 68 - 16000 */ netdev->min_mtu = LIO_MIN_MTU_SIZE; @@ -2321,7 +2152,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glists(lio, num_iqueues)) { + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; @@ -2371,6 +2202,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "NIC ifidx:%d Setup successful\n", i); octeon_free_soft_command(octeon_dev, sc); + + octeon_dev->no_speed_setting = 1; } return 0; @@ -2512,7 +2345,7 @@ static int octeon_device_init(struct octeon_device *oct) } atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); - if (octeon_allocate_ioq_vector(oct)) { + if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); return 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index 2adafa366d3f..ddd7431579f4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -201,13 +201,14 @@ lio_vf_rep_get_stats64(struct net_device *dev, { struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); - stats64->tx_packets = vf_rep->stats.tx_packets; - stats64->tx_bytes = vf_rep->stats.tx_bytes; - stats64->tx_dropped = vf_rep->stats.tx_dropped; - - stats64->rx_packets = vf_rep->stats.rx_packets; - stats64->rx_bytes = vf_rep->stats.rx_bytes; - stats64->rx_dropped = vf_rep->stats.rx_dropped; + /* Swap tx and rx stats as VF rep is a switch port */ + stats64->tx_packets = vf_rep->stats.rx_packets; + stats64->tx_bytes = vf_rep->stats.rx_bytes; + stats64->tx_dropped = vf_rep->stats.rx_dropped; + + stats64->rx_packets = vf_rep->stats.tx_packets; + stats64->rx_bytes = vf_rep->stats.tx_bytes; + stats64->rx_dropped = vf_rep->stats.tx_dropped; } static int diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 75eea83c7cc6..690424b6781a 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -28,7 +28,7 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 #define LIQUIDIO_BASE_MINOR_VERSION 7 -#define LIQUIDIO_BASE_MICRO_VERSION 0 +#define LIQUIDIO_BASE_MICRO_VERSION 2 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) #define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION) @@ -84,6 +84,7 @@ enum octeon_tag_type { #define OPCODE_NIC_IF_CFG 0x09 #define OPCODE_NIC_VF_DRV_NOTICE 0x0A #define OPCODE_NIC_INTRMOD_PARAMS 0x0B +#define OPCODE_NIC_QCOUNT_UPDATE 0x12 #define OPCODE_NIC_SET_TRUSTED_VF 0x13 #define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 #define VF_DRV_LOADED 1 @@ -92,6 +93,7 @@ enum octeon_tag_type { #define OPCODE_NIC_VF_REP_PKT 0x15 #define OPCODE_NIC_VF_REP_CMD 0x16 +#define OPCODE_NIC_UBOOT_CTL 0x17 #define CORE_DRV_TEST_SCATTER_OP 0xFFF5 @@ -248,6 +250,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 #define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 +#define SEAPI_CMD_SPEED_SET 0x2 +#define SEAPI_CMD_SPEED_GET 0x3 + #define LIO_CMD_WAIT_TM 100 /* RX(packets coming from wire) Checksum verification flags */ @@ -779,23 +784,32 @@ struct liquidio_if_cfg_info { /** Stats for each NIC port in RX direction. */ struct nic_rx_stats { /* link-level stats */ - u64 total_rcvd; - u64 bytes_rcvd; - u64 total_bcst; - u64 total_mcst; - u64 runts; - u64 ctl_rcvd; - u64 fifo_err; /* Accounts for over/under-run of buffers */ - u64 dmac_drop; - u64 fcs_err; - u64 jabber_err; - u64 l2_err; - u64 frame_err; + u64 total_rcvd; /* Received packets */ + u64 bytes_rcvd; /* Octets of received packets */ + u64 total_bcst; /* Number of non-dropped L2 broadcast packets */ + u64 total_mcst; /* Number of non-dropped L2 multicast packets */ + u64 runts; /* Packets shorter than allowed */ + u64 ctl_rcvd; /* Received PAUSE packets */ + u64 fifo_err; /* Packets dropped due to RX FIFO full */ + u64 dmac_drop; /* Packets dropped by the DMAC filter */ + u64 fcs_err; /* Sum of fragment, overrun, and FCS errors */ + u64 jabber_err; /* Packets larger than allowed */ + u64 l2_err; /* Sum of DMA, parity, PCAM access, no memory, + * buffer overflow, malformed L2 header or + * length, oversize errors + **/ + u64 frame_err; /* Sum of IPv4 and L4 checksum errors */ + u64 red_drops; /* Packets dropped by RED due to buffer + * exhaustion + **/ /* firmware stats */ u64 fw_total_rcvd; u64 fw_total_fwd; u64 fw_total_fwd_bytes; + u64 fw_total_mcast; + u64 fw_total_bcast; + u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; @@ -806,11 +820,11 @@ struct nic_rx_stats { u64 fw_lro_pkts; /* Number of packets that are LROed */ u64 fw_lro_octs; /* Number of octets that are LROed */ u64 fw_total_lro; /* Number of LRO packets formed */ - u64 fw_lro_aborts; /* Number of times lRO of packet aborted */ + u64 fw_lro_aborts; /* Number of times LRO of packet aborted */ u64 fw_lro_aborts_port; u64 fw_lro_aborts_seq; u64 fw_lro_aborts_tsval; - u64 fw_lro_aborts_timer; + u64 fw_lro_aborts_timer; /* Timer setting error */ /* intrmod: packet forward rate */ u64 fwd_rate; }; @@ -818,23 +832,42 @@ struct nic_rx_stats { /** Stats for each NIC port in RX direction. */ struct nic_tx_stats { /* link-level stats */ - u64 total_pkts_sent; - u64 total_bytes_sent; - u64 mcast_pkts_sent; - u64 bcast_pkts_sent; - u64 ctl_sent; - u64 one_collision_sent; /* Packets sent after one collision*/ - u64 multi_collision_sent; /* Packets sent after multiple collision*/ - u64 max_collision_fail; /* Packets not sent due to max collisions */ - u64 max_deferral_fail; /* Packets not sent due to max deferrals */ - u64 fifo_err; /* Accounts for over/under-run of buffers */ - u64 runts; - u64 total_collisions; /* Total number of collisions detected */ + u64 total_pkts_sent; /* Total frames sent on the interface */ + u64 total_bytes_sent; /* Total octets sent on the interface */ + u64 mcast_pkts_sent; /* Packets sent to the multicast DMAC */ + u64 bcast_pkts_sent; /* Packets sent to a broadcast DMAC */ + u64 ctl_sent; /* Control/PAUSE packets sent */ + u64 one_collision_sent; /* Packets sent that experienced a + * single collision before successful + * transmission + **/ + u64 multi_collision_sent; /* Packets sent that experienced + * multiple collisions before successful + * transmission + **/ + u64 max_collision_fail; /* Packets dropped due to excessive + * collisions + **/ + u64 max_deferral_fail; /* Packets not sent due to max + * deferrals + **/ + u64 fifo_err; /* Packets sent that experienced a + * transmit underflow and were + * truncated + **/ + u64 runts; /* Packets sent with an octet count + * lessthan 64 + **/ + u64 total_collisions; /* Packets dropped due to excessive + * collisions + **/ /* firmware stats */ u64 fw_total_sent; u64 fw_total_fwd; u64 fw_total_fwd_bytes; + u64 fw_total_mcast_sent; + u64 fw_total_bcast_sent; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f38abf626412..f878a552fef3 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -824,23 +824,18 @@ int octeon_deregister_device(struct octeon_device *oct) } int -octeon_allocate_ioq_vector(struct octeon_device *oct) +octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs) { - int i, num_ioqs = 0; struct octeon_ioq_vector *ioq_vector; int cpu_num; int size; - - if (OCTEON_CN23XX_PF(oct)) - num_ioqs = oct->sriov_info.num_pf_rings; - else if (OCTEON_CN23XX_VF(oct)) - num_ioqs = oct->sriov_info.rings_per_vf; + int i; size = sizeof(struct octeon_ioq_vector) * num_ioqs; oct->ioq_vector = vzalloc(size); if (!oct->ioq_vector) - return 1; + return -1; for (i = 0; i < num_ioqs; i++) { ioq_vector = &oct->ioq_vector[i]; ioq_vector->oct_dev = oct; @@ -856,6 +851,7 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) else ioq_vector->ioq_num = i; } + return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 91937cc5c1d7..94a4ed88d618 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -43,6 +43,13 @@ #define OCTEON_CN23XX_REV_1_1 0x01 #define OCTEON_CN23XX_REV_2_0 0x80 +/**SubsystemId for the chips */ +#define OCTEON_CN2350_10GB_SUBSYS_ID_1 0X3177d +#define OCTEON_CN2350_10GB_SUBSYS_ID_2 0X4177d +#define OCTEON_CN2360_10GB_SUBSYS_ID 0X5177d +#define OCTEON_CN2350_25GB_SUBSYS_ID 0X7177d +#define OCTEON_CN2360_25GB_SUBSYS_ID 0X6177d + /** Endian-swap modes supported by Octeon. */ enum octeon_pci_swap_mode { OCTEON_PCI_PASSTHROUGH = 0, @@ -430,6 +437,8 @@ struct octeon_device { u16 rev_id; + u32 subsystem_id; + u16 pf_num; u16 vf_num; @@ -584,6 +593,11 @@ struct octeon_device { struct lio_vf_rep_list vf_rep_list; struct devlink *devlink; enum devlink_eswitch_mode eswitch_mode; + + /* for 25G NIC speed change */ + u8 speed_boot; + u8 speed_setting; + u8 no_speed_setting; }; #define OCT_DRV_ONLINE 1 @@ -867,7 +881,7 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type); struct octeon_config *octeon_get_conf(struct octeon_device *oct); void octeon_free_ioq_vector(struct octeon_device *oct); -int octeon_allocate_ioq_vector(struct octeon_device *oct); +int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs); void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq); /* LiquidIO driver pivate flags */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 81c987682941..5fed7b63223e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -62,8 +62,8 @@ struct oct_iq_stats { u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ u64 tx_gso; /* count of tso */ u64 tx_vxlan; /* tunnel */ - u64 tx_dmamap_fail; - u64 tx_restart; + u64 tx_dmamap_fail; /* Number of times dma mapping failed */ + u64 tx_restart; /* Number of times this queue restarted */ }; #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c index 28e74ee23ff8..021d99cd1665 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -24,6 +24,7 @@ #include "octeon_device.h" #include "octeon_main.h" #include "octeon_mailbox.h" +#include "cn23xx_pf_device.h" /** * octeon_mbox_read: @@ -205,6 +206,26 @@ int octeon_mbox_write(struct octeon_device *oct, return ret; } +static void get_vf_stats(struct octeon_device *oct, + struct oct_vf_stats *stats) +{ + int i; + + for (i = 0; i < oct->num_iqs; i++) { + if (!oct->instr_queue[i]) + continue; + stats->tx_packets += oct->instr_queue[i]->stats.tx_done; + stats->tx_bytes += oct->instr_queue[i]->stats.tx_tot_bytes; + } + + for (i = 0; i < oct->num_oqs; i++) { + if (!oct->droq[i]) + continue; + stats->rx_packets += oct->droq[i]->stats.rx_pkts_received; + stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received; + } +} + /** * octeon_mbox_process_cmd: * @mbox: Pointer mailbox @@ -250,6 +271,15 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox, mbox_cmd->msg.s.params); break; + case OCTEON_GET_VF_STATS: + dev_dbg(&oct->pci_dev->dev, "Got VF stats request. Sending data back\n"); + mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE; + mbox_cmd->msg.s.resp_needed = 1; + mbox_cmd->msg.s.len = 1 + + sizeof(struct oct_vf_stats) / sizeof(u64); + get_vf_stats(oct, (struct oct_vf_stats *)mbox_cmd->data); + octeon_mbox_write(oct, mbox_cmd); + break; default: break; } @@ -322,3 +352,25 @@ int octeon_mbox_process_message(struct octeon_mbox *mbox) return 0; } + +int octeon_mbox_cancel(struct octeon_device *oct, int q_no) +{ + struct octeon_mbox *mbox = oct->mbox[q_no]; + struct octeon_mbox_cmd *mbox_cmd; + unsigned long flags = 0; + + spin_lock_irqsave(&mbox->lock, flags); + mbox_cmd = &mbox->mbox_resp; + + if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) { + spin_unlock_irqrestore(&mbox->lock, flags); + return 1; + } + + mbox->state = OCTEON_MBOX_STATE_IDLE; + memset(mbox_cmd, 0, sizeof(*mbox_cmd)); + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h index 1def22afeff1..d92bd7e16477 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -25,6 +25,7 @@ #define OCTEON_VF_ACTIVE 0x1 #define OCTEON_VF_FLR_REQUEST 0x2 #define OCTEON_PF_CHANGED_VF_MACADDR 0x4 +#define OCTEON_GET_VF_STATS 0x8 /*Macro for Read acknowldgement*/ #define OCTEON_PFVFACK 0xffffffffffffffffULL @@ -107,9 +108,15 @@ struct octeon_mbox { }; +struct oct_vf_stats_ctx { + atomic_t status; + struct oct_vf_stats *stats; +}; + int octeon_mbox_read(struct octeon_mbox *mbox); int octeon_mbox_write(struct octeon_device *oct, struct octeon_mbox_cmd *mbox_cmd); int octeon_mbox_process_message(struct octeon_mbox *mbox); +int octeon_mbox_cancel(struct octeon_device *oct, int q_no); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 4069710796a8..d7a3916fe877 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -47,6 +47,29 @@ struct liquidio_if_cfg_resp { u64 status; }; +#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */ + +/* Structure of a node in list of gather components maintained by + * NIC driver for each network device. + */ +struct octnic_gather { + /* List manipulation. Next and prev pointers. */ + struct list_head list; + + /* Size of the gather component at sg in bytes. */ + int sg_size; + + /* Number of bytes that sg was adjusted to make it 8B-aligned. */ + int adjust; + + /* Gather component that can accommodate max sized fragment list + * received from the IP layer. + */ + struct octeon_sg_entry *sg; + + dma_addr_t sg_dma_ptr; +}; + struct oct_nic_stats_resp { u64 rh; struct oct_link_stats stats; @@ -58,6 +81,18 @@ struct oct_nic_stats_ctrl { struct net_device *netdev; }; +struct oct_nic_seapi_resp { + u64 rh; + u32 speed; + u64 status; +}; + +struct liquidio_nic_seapi_ctl_context { + int octeon_id; + u32 status; + struct completion complete; +}; + /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ @@ -157,7 +192,7 @@ struct lio { #define LIO_SIZE (sizeof(struct lio)) #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) -#define LIO_MAX_CORES 12 +#define LIO_MAX_CORES 16 /** * \brief Enable or disable feature @@ -190,6 +225,8 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); +int octnet_get_link_stats(struct net_device *netdev); + int lio_wait_for_clean_oq(struct octeon_device *oct); /** * \brief Register ethtool operations @@ -197,6 +234,17 @@ int lio_wait_for_clean_oq(struct octeon_device *oct); */ void liquidio_set_ethtool_ops(struct net_device *netdev); +void lio_if_cfg_callback(struct octeon_device *oct, + u32 status __attribute__((unused)), + void *buf); + +void lio_delete_glists(struct lio *lio); + +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); + +int liquidio_get_speed(struct lio *lio); +int liquidio_set_speed(struct lio *lio, int speed); + /** * \brief Net device change_mtu * @param netdev network device @@ -515,7 +563,7 @@ static inline void stop_txqs(struct net_device *netdev) { int i; - for (i = 0; i < netdev->num_tx_queues; i++) + for (i = 0; i < netdev->real_num_tx_queues; i++) netif_stop_subqueue(netdev, i); } @@ -528,7 +576,7 @@ static inline void wake_txqs(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); int i, qno; - for (i = 0; i < netdev->num_tx_queues; i++) { + for (i = 0; i < netdev->real_num_tx_queues; i++) { qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; if (__netif_subqueue_stopped(netdev, i)) { @@ -549,14 +597,33 @@ static inline void start_txqs(struct net_device *netdev) int i; if (lio->linfo.link.s.link_up) { - for (i = 0; i < netdev->num_tx_queues; i++) + for (i = 0; i < netdev->real_num_tx_queues; i++) netif_start_subqueue(netdev, i); } } -static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb) { - return skb->queue_mapping % lio->linfo.num_txpciq; + return skb->queue_mapping % oct->num_iqs; +} + +/** + * Remove the node at the head of the list. The list would be empty at + * the end of this call if there are no more nodes in the list. + */ +static inline struct list_head *lio_list_delete_head(struct list_head *root) +{ + struct list_head *node; + + if (root->prev == root && root->next == root) + node = NULL; + else + node = root->next; + + if (node) + list_del(node); + + return node; } #endif diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 707db3304396..7135db45927e 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -538,9 +538,9 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, action = bpf_prog_run_xdp(prog, &xdp); rcu_read_unlock(); + len = xdp.data_end - xdp.data; /* Check if XDP program has changed headers */ if (orig_data != xdp.data) { - len = xdp.data_end - xdp.data; offset = orig_data - xdp.data; dma_addr -= offset; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e988caa797cb..20b6e1b3f5e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -620,7 +620,7 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size; void *s = NULL; - void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); + void *p = dma_zalloc_coherent(&pdev->dev, len, phys, GFP_KERNEL); if (!p) return NULL; @@ -633,7 +633,6 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size, } *(void **)metadata = s; } - memset(p, 0, len); return p; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index dc25066c59a1..3c5057868ab3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -62,6 +62,18 @@ struct cudbg_hw_sched { u32 map; }; +#define SGE_QBASE_DATA_REG_NUM 4 + +struct sge_qbase_reg_field { + u32 reg_addr; + u32 reg_data[SGE_QBASE_DATA_REG_NUM]; + /* Max supported PFs */ + u32 pf_data_value[PCIE_FW_MASTER_M + 1][SGE_QBASE_DATA_REG_NUM]; + /* Max supported VFs */ + u32 vf_data_value[T6_VF_M + 1][SGE_QBASE_DATA_REG_NUM]; + u32 vfcount; /* Actual number of max vfs in current configuration */ +}; + struct ireg_field { u32 ireg_addr; u32 ireg_data; @@ -235,6 +247,9 @@ struct cudbg_vpd_data { }; #define CUDBG_MAX_TCAM_TID 0x800 +#define CUDBG_T6_CLIP 1536 +#define CUDBG_MAX_TID_COMP_EN 6144 +#define CUDBG_MAX_TID_COMP_DIS 3072 enum cudbg_le_entry_types { LE_ET_UNKNOWN = 0, @@ -354,6 +369,11 @@ static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { {0x10cc, 0x10d4, 0x0, 16}, }; +static const u32 t6_sge_qbase_index_array[] = { + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ + 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, +}; + static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 8568a51f6414..215fe6260fd7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -24,6 +24,7 @@ #define CUDBG_STATUS_NOT_IMPLEMENTED -28 #define CUDBG_SYSTEM_ERROR -29 #define CUDBG_STATUS_CCLK_NOT_DEFINED -32 +#define CUDBG_STATUS_PARTIAL_DATA -41 #define CUDBG_MAJOR_VERSION 1 #define CUDBG_MINOR_VERSION 14 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 9da6f57901a9..0afcfe99bff3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1339,16 +1339,39 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } +static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap, + struct sge_qbase_reg_field *qbase, + u32 func, bool is_pf) +{ + u32 *buff, i; + + if (is_pf) { + buff = qbase->pf_data_value[func]; + } else { + buff = qbase->vf_data_value[func]; + /* In SGE_QBASE_INDEX, + * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256. + */ + func += 8; + } + + t4_write_reg(padap, qbase->reg_addr, func); + for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++) + *buff = t4_read_reg(padap, qbase->reg_data[i]); +} + int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; + struct sge_qbase_reg_field *sge_qbase; struct ireg_buf *ch_sge_dbg; int i, rc; - rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2, + rc = cudbg_get_buff(pdbg_init, dbg_buff, + sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase), &temp_buff); if (rc) return rc; @@ -1370,6 +1393,28 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, sge_pio->ireg_local_offset); ch_sge_dbg++; } + + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { + sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg; + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg + * SGE_QBASE_MAP[0-3] + */ + sge_qbase->reg_addr = t6_sge_qbase_index_array[0]; + for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++) + sge_qbase->reg_data[i] = + t6_sge_qbase_index_array[i + 1]; + + for (i = 0; i <= PCIE_FW_MASTER_M; i++) + cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, + i, true); + + for (i = 0; i < padap->params.arch.vfcount; i++) + cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase, + i, false); + + sge_qbase->vfcount = padap->params.arch.vfcount; + } + return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } @@ -2366,8 +2411,11 @@ void cudbg_fill_le_tcam_info(struct adapter *padap, value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A); tcam_region->routing_start = value; - /*Get clip table index */ - value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); + /* Get clip table index. For T6 there is separate CLIP TCAM */ + if (is_t6(padap->params.chip)) + value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A); + else + value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); tcam_region->clip_start = value; /* Get filter table index */ @@ -2392,8 +2440,16 @@ void cudbg_fill_le_tcam_info(struct adapter *padap, tcam_region->tid_hash_base; } } else { /* hash not enabled */ - tcam_region->max_tid = CUDBG_MAX_TCAM_TID; + if (is_t6(padap->params.chip)) + tcam_region->max_tid = (value & ASLIPCOMPEN_F) ? + CUDBG_MAX_TID_COMP_EN : + CUDBG_MAX_TID_COMP_DIS; + else + tcam_region->max_tid = CUDBG_MAX_TCAM_TID; } + + if (is_t6(padap->params.chip)) + tcam_region->max_tid += CUDBG_T6_CLIP; } int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, @@ -2423,18 +2479,31 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, for (i = 0; i < tcam_region.max_tid; ) { rc = cudbg_read_tid(pdbg_init, i, tid_data); if (rc) { - cudbg_err->sys_err = rc; - cudbg_put_buff(pdbg_init, &temp_buff); - return rc; + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + /* Update tcam header and exit */ + tcam_region.max_tid = i; + memcpy(temp_buff.data, &tcam_region, + sizeof(struct cudbg_tcam)); + goto out; } - /* ipv6 takes two tids */ - cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++; + if (cudbg_is_ipv6_entry(tid_data, tcam_region)) { + /* T6 CLIP TCAM: ipv6 takes 4 entries */ + if (is_t6(padap->params.chip) && + i >= tcam_region.clip_start && + i < tcam_region.clip_start + CUDBG_T6_CLIP) + i += 4; + else /* Main TCAM: ipv6 takes two tids */ + i += 2; + } else { + i++; + } tid_data++; bytes += sizeof(struct cudbg_tid_data); } +out: return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 688f95440af2..0dbe2d9e22d6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -50,6 +50,7 @@ #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> #include <linux/ptp_classify.h> +#include <linux/crash_dump.h> #include <asm/io.h> #include "t4_chip_type.h" #include "cxgb4_uld.h" @@ -490,6 +491,9 @@ struct link_config { unsigned char link_ok; /* link up? */ unsigned char link_down_rc; /* link down reason */ + + bool new_module; /* ->OS Transceiver Module inserted */ + bool redo_l1cfg; /* ->CC redo current "sticky" L1 CFG */ }; #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) @@ -964,6 +968,9 @@ struct adapter { struct hma_data hma; struct srq_data *srq; + + /* Dump buffer for collecting logs in kdump kernel */ + struct vmcoredd_data vmcoredd; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be @@ -1034,6 +1041,7 @@ struct ch_sched_queue { #define VF_BITWIDTH 8 #define IVLAN_BITWIDTH 16 #define OVLAN_BITWIDTH 16 +#define ENCAP_VNI_BITWIDTH 24 /* Filter matching rules. These consist of a set of ingress packet field * (value, mask) tuples. The associated ingress packet field matches the @@ -1064,6 +1072,7 @@ struct ch_filter_tuple { uint32_t ivlan_vld:1; /* inner VLAN valid */ uint32_t ovlan_vld:1; /* outer VLAN valid */ uint32_t pfvf_vld:1; /* PF/VF valid */ + uint32_t encap_vld:1; /* Encapsulation valid */ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ uint32_t iport:IPORT_BITWIDTH; /* ingress port */ @@ -1074,6 +1083,7 @@ struct ch_filter_tuple { uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ + uint32_t vni:ENCAP_VNI_BITWIDTH; /* VNI of tunnel */ /* Uncompressed header matching field rules. These are always * available for field rules. @@ -1317,7 +1327,7 @@ static inline unsigned int qtimer_val(const struct adapter *adap, extern char cxgb4_driver_name[]; extern const char cxgb4_driver_version[]; -void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_portmod_changed(struct adapter *adap, int port_id); void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); void t4_free_sge_resources(struct adapter *adap); @@ -1498,8 +1508,25 @@ void t4_intr_disable(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter); int t4_wait_dev_ready(void __iomem *regs); -int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, - struct link_config *lc); + +int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox, + unsigned int port, struct link_config *lc, + bool sleep_ok, int timeout); + +static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc) +{ + return t4_link_l1cfg_core(adapter, mbox, port, lc, + true, FW_CMD_MAX_TIMEOUT); +} + +static inline int t4_link_l1cfg_ns(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc) +{ + return t4_link_l1cfg_core(adapter, mbox, port, lc, + false, FW_CMD_MAX_TIMEOUT); +} + int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); u32 t4_read_pcie_cfg4(struct adapter *adap, int reg); @@ -1690,6 +1717,12 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); +int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx, + bool sleep_ok); +int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int vni, + unsigned int vni_mask, u8 dip_hit, u8 lookup_type, + bool sleep_ok); int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); @@ -1705,6 +1738,9 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok); int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); +int t4_enable_pi_params(struct adapter *adap, unsigned int mbox, + struct port_info *pi, + bool rx_en, bool tx_en, bool dcb_en); int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, bool rx_en, bool tx_en); int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 143686c60234..8d751efcb90e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -214,7 +214,8 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct ireg_buf) * n; break; case CUDBG_SGE_INDIRECT: - len = sizeof(struct ireg_buf) * 2; + len = sizeof(struct ireg_buf) * 2 + + sizeof(struct sge_qbase_reg_field); break; case CUDBG_ULPRX_LA: len = sizeof(struct cudbg_ulprx_la); @@ -488,3 +489,28 @@ void cxgb4_init_ethtool_dump(struct adapter *adapter) adapter->eth_dump.version = adapter->params.fw_vers; adapter->eth_dump.len = 0; } + +static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf) +{ + struct adapter *adap = container_of(data, struct adapter, vmcoredd); + u32 len = data->size; + + return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL); +} + +int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap) +{ + struct vmcoredd_data *data = &adap->vmcoredd; + u32 len; + + len = sizeof(struct cudbg_hdr) + + sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY; + len += CUDBG_DUMP_BUFF_SIZE; + + data->size = len; + snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s", + cxgb4_driver_name, adap->name); + data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect; + + return vmcore_add_device_dump(data); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h index ce1ac9a1c878..ef59ba1ed968 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -41,8 +41,11 @@ enum CXGB4_ETHTOOL_DUMP_FLAGS { CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */ }; +#define CXGB4_ETH_DUMP_ALL (CXGB4_ETH_DUMP_MEM | CXGB4_ETH_DUMP_HW) + u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag); int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, u32 flag); void cxgb4_init_ethtool_dump(struct adapter *adapter); +int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap); #endif /* __CXGB4_CUDBG_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 59d04d73c672..f7eef93ffc87 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -800,24 +800,20 @@ static int set_link_ksettings(struct net_device *dev, if (base->duplex != DUPLEX_FULL) return -EINVAL; - if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { - /* PHY offers a single speed. See if that's what's - * being requested. - */ - if (base->autoneg == AUTONEG_DISABLE && - (lc->pcaps & speed_to_fw_caps(base->speed))) - return 0; - return -EINVAL; - } - old_lc = *lc; - if (base->autoneg == AUTONEG_DISABLE) { + if (!(lc->pcaps & FW_PORT_CAP32_ANEG) || + base->autoneg == AUTONEG_DISABLE) { fw_caps = speed_to_fw_caps(base->speed); - if (!(lc->pcaps & fw_caps)) + /* Must only specify a single speed which must be supported + * as part of the Physical Port Capabilities. + */ + if ((fw_caps & (fw_caps - 1)) != 0 || + !(lc->pcaps & fw_caps)) return -EINVAL; + lc->speed_caps = fw_caps; - lc->acaps = 0; + lc->acaps = fw_caps; } else { fw_caps = lmm_to_fw_caps(link_ksettings->link_modes.advertising); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index b76447baccaf..00fc5f1afb1d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -64,8 +64,7 @@ static int set_tcb_field(struct adapter *adap, struct filter_entry *f, if (!skb) return -ENOMEM; - req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req)); INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid); req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(adap->sge.fw_evtq.abs_id) | @@ -266,6 +265,8 @@ static int validate_filter(struct net_device *dev, fs->mask.pfvf_vld) || unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld, fs->mask.ovlan_vld) || + unsupported(fconf, VNIC_ID_F, fs->val.encap_vld, + fs->mask.encap_vld) || unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld)) return -EOPNOTSUPP; @@ -276,8 +277,12 @@ static int validate_filter(struct net_device *dev, * carries that overlap, we need to translate any PF/VF * specification into that internal format below. */ - if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) && - is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) + if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) && + is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) || + (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) && + is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) || + (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) && + is_field_set(fs->val.encap_vld, fs->mask.encap_vld))) return -EOPNOTSUPP; if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) || (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) && @@ -307,6 +312,9 @@ static int validate_filter(struct net_device *dev, fs->newvlan == VLAN_REWRITE)) return -EOPNOTSUPP; + if (fs->val.encap_vld && + CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) + return -EOPNOTSUPP; return 0; } @@ -706,6 +714,8 @@ int delete_filter(struct adapter *adapter, unsigned int fidx) */ void clear_filter(struct adapter *adap, struct filter_entry *f) { + struct port_info *pi = netdev_priv(f->dev); + /* If the new or old filter have loopback rewriteing rules then we'll * need to free any existing L2T, SMT, CLIP entries of filter * rule. @@ -716,6 +726,12 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) if (f->smt) cxgb4_smt_release(f->smt); + if (f->fs.val.encap_vld && f->fs.val.ovlan_vld) + if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan & + 0x1ff].refcnt)) + t4_free_encap_mac_filt(adap, pi->viid, + f->fs.val.ovlan & 0x1ff, 0); + if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type) cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); @@ -841,6 +857,10 @@ bool is_filter_exact_match(struct adapter *adap, if (!is_hashfilter(adap)) return false; + /* Keep tunnel VNI match disabled for hash-filters for now */ + if (fs->mask.encap_vld) + return false; + if (fs->type) { if (is_inaddr_any(fs->val.fip, AF_INET6) || !is_addr_all_mask(fs->mask.fip, AF_INET6)) @@ -934,8 +954,12 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs, ntuple |= (u64)(fs->val.tos) << tp->tos_shift; if (tp->vnic_shift >= 0) { - if ((adap->params.tp.ingress_config & VNIC_F) && - fs->mask.pfvf_vld) + if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) && + fs->mask.encap_vld) + ntuple |= (u64)((fs->val.encap_vld << 16) | + (fs->val.ovlan)) << tp->vnic_shift; + else if ((adap->params.tp.ingress_config & VNIC_F) && + fs->mask.pfvf_vld) ntuple |= (u64)((fs->val.pfvf_vld << 16) | (fs->val.pf << 13) | (fs->val.vf)) << tp->vnic_shift; @@ -1049,6 +1073,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); + struct port_info *pi = netdev_priv(dev); struct tid_info *t = &adapter->tids; struct filter_entry *f; struct sk_buff *skb; @@ -1115,13 +1140,34 @@ static int cxgb4_set_hash_filter(struct net_device *dev, f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; + } else if (iconf & USE_ENC_IDX_F) { + if (f->fs.val.encap_vld) { + struct port_info *pi = netdev_priv(f->dev); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + + /* allocate MPS TCAM entry */ + ret = t4_alloc_encap_mac_filt(adapter, pi->viid, + match_all_mac, + match_all_mac, + f->fs.val.vni, + f->fs.mask.vni, + 0, 1, 1); + if (ret < 0) + goto free_atid; + + atomic_inc(&adapter->mps_encap[ret].refcnt); + f->fs.val.ovlan = ret; + f->fs.mask.ovlan = 0xffff; + f->fs.val.ovlan_vld = 1; + f->fs.mask.ovlan_vld = 1; + } } size = sizeof(struct cpl_t6_act_open_req); if (f->fs.type) { ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1); if (ret) - goto free_atid; + goto free_mps; skb = alloc_skb(size, GFP_KERNEL); if (!skb) { @@ -1136,7 +1182,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev, skb = alloc_skb(size, GFP_KERNEL); if (!skb) { ret = -ENOMEM; - goto free_atid; + goto free_mps; } mk_act_open_req(f, skb, @@ -1152,6 +1198,10 @@ static int cxgb4_set_hash_filter(struct net_device *dev, free_clip: cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); +free_mps: + if (f->fs.val.encap_vld && f->fs.val.ovlan_vld) + t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1); + free_atid: cxgb4_free_atid(t, atid); @@ -1333,6 +1383,27 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; + } else if (iconf & USE_ENC_IDX_F) { + if (f->fs.val.encap_vld) { + struct port_info *pi = netdev_priv(f->dev); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + + /* allocate MPS TCAM entry */ + ret = t4_alloc_encap_mac_filt(adapter, pi->viid, + match_all_mac, + match_all_mac, + f->fs.val.vni, + f->fs.mask.vni, + 0, 1, 1); + if (ret < 0) + goto free_clip; + + atomic_inc(&adapter->mps_encap[ret].refcnt); + f->fs.val.ovlan = ret; + f->fs.mask.ovlan = 0x1ff; + f->fs.val.ovlan_vld = 1; + f->fs.mask.ovlan_vld = 1; + } } /* Attempt to set the filter. If we don't succeed, we clear @@ -1349,6 +1420,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, } return ret; + +free_clip: + if (is_t6(adapter->params.chip) && f->fs.type) + cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); + cxgb4_clear_ftid(&adapter->tids, filter_id, + fs->type ? PF_INET6 : PF_INET, chip_ver); + return ret; } static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 005283c7cdfe..0efae2030e71 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -301,14 +301,14 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) } } -void t4_os_portmod_changed(const struct adapter *adap, int port_id) +void t4_os_portmod_changed(struct adapter *adap, int port_id) { static const char *mod_str[] = { NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" }; - const struct net_device *dev = adap->port[port_id]; - const struct port_info *pi = netdev_priv(dev); + struct net_device *dev = adap->port[port_id]; + struct port_info *pi = netdev_priv(dev); if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) netdev_info(dev, "port module unplugged\n"); @@ -325,6 +325,11 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) else netdev_info(dev, "%s: unknown module type %d inserted\n", dev->name, pi->mod_type); + + /* If the interface is running, then we'll need any "sticky" Link + * Parameters redone with a new Transceiver Module. + */ + pi->link_cfg.redo_l1cfg = netif_running(dev); } int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ @@ -460,7 +465,7 @@ static int link_start(struct net_device *dev) &pi->link_cfg); if (ret == 0) { local_bh_disable(); - ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true, + ret = t4_enable_pi_params(pi->adapter, mb, pi, true, true, CXGB4_DCB_ENABLED); local_bh_enable(); } @@ -2339,7 +2344,8 @@ static int cxgb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); + ret = t4_enable_pi_params(adapter, adapter->pf, pi, + false, false, false); #ifdef CONFIG_CHELSIO_T4_DCB cxgb4_dcb_reset(dev); dcb_tx_queue_prio_enable(dev, false); @@ -2886,13 +2892,13 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) } /* Convert from Mbps to Kbps */ - req_rate = rate << 10; + req_rate = rate * 1000; /* Max rate is 100 Gbps */ - if (req_rate >= SCHED_MAX_RATE_KBPS) { + if (req_rate > SCHED_MAX_RATE_KBPS) { dev_err(adap->pdev_dev, "Invalid rate %u Mbps, Max rate is %u Mbps\n", - rate, SCHED_MAX_RATE_KBPS >> 10); + rate, SCHED_MAX_RATE_KBPS / 1000); return -ERANGE; } @@ -3081,7 +3087,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev, match_all_mac, match_all_mac, adapter->rawf_start + pi->port_id, - 1, pi->port_id, true); + 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", i); @@ -3169,7 +3175,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev, match_all_mac, adapter->rawf_start + pi->port_id, - 1, pi->port_id, true); + 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", be16_to_cpu(ti->port)); @@ -4135,6 +4141,10 @@ static int adap_init0(struct adapter *adap) * card */ card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL); + if (!card_fw) { + ret = -ENOMEM; + goto bye; + } /* Get FW from from /lib/firmware/ */ ret = request_firmware(&fw, fw_info->fw_mod_name, @@ -4276,6 +4286,20 @@ static int adap_init0(struct adapter *adap) adap->tids.nftids = val[4] - val[3] + 1; adap->sge.ingr_start = val[5]; + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + /* Read the raw mps entries. In T6, the last 2 tcam entries + * are reserved for raw mac addresses (rawf = 2, one per port). + */ + params[0] = FW_PARAM_PFVF(RAWF_START); + params[1] = FW_PARAM_PFVF(RAWF_END); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, + params, val); + if (ret == 0) { + adap->rawf_start = val[0]; + adap->rawf_cnt = val[1] - val[0] + 1; + } + } + /* qids (ingress/egress) returned from firmware can be anywhere * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END. * Hence driver needs to allocate memory for this range to @@ -5181,6 +5205,7 @@ static void free_some_resources(struct adapter *adapter) { unsigned int i; + kvfree(adapter->mps_encap); kvfree(adapter->smt); kvfree(adapter->l2t); kvfree(adapter->srq); @@ -5216,14 +5241,11 @@ static void free_some_resources(struct adapter *adapter) NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) #define SEGMENT_SIZE 128 -static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) +static int t4_get_chip_type(struct adapter *adap, int ver) { - u16 device_id; + u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A)); - /* Retrieve adapter's device ID */ - pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); - - switch (device_id >> 12) { + switch (ver) { case CHELSIO_T4: return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); case CHELSIO_T5: @@ -5231,8 +5253,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) case CHELSIO_T6: return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev); default: - dev_err(&pdev->dev, "Device %d is not supported\n", - device_id); + break; } return -EINVAL; } @@ -5261,13 +5282,9 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) u32 pcie_fw; pcie_fw = readl(adap->regs + PCIE_FW_A); - /* Check if cxgb4 is the MASTER and fw is initialized */ - if (num_vfs && - (!(pcie_fw & PCIE_FW_INIT_F) || - !(pcie_fw & PCIE_FW_MASTER_VLD_F) || - PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) { - dev_warn(&pdev->dev, - "cxgb4 driver needs to be MASTER to support SRIOV\n"); + /* Check if fw is initialized */ + if (!(pcie_fw & PCIE_FW_INIT_F)) { + dev_warn(&pdev->dev, "Device not initialized\n"); return -EOPNOTSUPP; } @@ -5406,15 +5423,18 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - int func, i, err, s_qpp, qpp, num_seg; + struct net_device *netdev; + struct adapter *adapter; + static int adap_idx = 1; + int s_qpp, qpp, num_seg; struct port_info *pi; bool highdma = false; - struct adapter *adapter = NULL; - struct net_device *netdev; - void __iomem *regs; - u32 whoami, pl_rev; enum chip_type chip; - static int adap_idx = 1; + void __iomem *regs; + int func, chip_ver; + u16 device_id; + int i, err; + u32 whoami; printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); @@ -5450,11 +5470,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_adapter; /* We control everything through one PF */ - whoami = readl(regs + PL_WHOAMI_A); - pl_rev = REV_G(readl(regs + PL_REV_A)); - chip = get_chip_type(pdev, pl_rev); - func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ? - SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); + whoami = t4_read_reg(adapter, PL_WHOAMI_A); + pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); + chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id)); + if (chip < 0) { + dev_err(&pdev->dev, "Device %d is not supported\n", device_id); + err = chip; + goto out_free_adapter; + } + chip_ver = CHELSIO_CHIP_VERSION(chip); + func = chip_ver <= CHELSIO_T5 ? + SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; @@ -5543,6 +5569,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_free_adapter; + if (is_kdump_kernel()) { + /* Collect hardware state and append to /proc/vmcore */ + err = cxgb4_cudbg_vmcore_add_dump(adapter); + if (err) { + dev_warn(adapter->pdev_dev, + "Fail collecting vmcore device dump, err: %d. Continuing\n", + err); + err = 0; + } + } if (!is_t4(adapter->params.chip)) { s_qpp = (QUEUESPERPAGEPF0_S + @@ -5610,8 +5646,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; - if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5) + if (chip_ver > CHELSIO_T5) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO | NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + } if (highdma) netdev->hw_features |= NETIF_F_HIGHDMA; @@ -5676,8 +5719,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->params.offload = 0; } + adapter->mps_encap = kvzalloc(sizeof(struct mps_encap_entry) * + adapter->params.arch.mps_tcam_size, + GFP_KERNEL); + if (!adapter->mps_encap) + dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n"); + #if IS_ENABLED(CONFIG_IPV6) - if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) && + if (chip_ver <= CHELSIO_T5 && (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) { /* CLIP functionality is not present in hardware, * hence disable all offload features diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 36563364bae7..3ddd2c4acf68 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -194,6 +194,23 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.tos = mask->tos; } + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key, *mask; + + key = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + cls->key); + mask = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + cls->mask); + fs->val.vni = be32_to_cpu(key->keyid); + fs->mask.vni = be32_to_cpu(mask->keyid); + if (fs->mask.vni) { + fs->val.encap_vld = 1; + fs->mask.encap_vld = 1; + } + } + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_dissector_key_vlan *key, *mask; u16 vlan_tci, vlan_tci_mask; @@ -247,6 +264,7 @@ static int cxgb4_validate_flow_match(struct net_device *dev, BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IP))) { netdev_warn(dev, "Unsupported key used: 0x%x\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 1817a0307d26..77c2c538b1fd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -491,7 +491,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev, if (tp->protocol_shift >= 0) ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; - if (tp->vnic_shift >= 0) { + if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) { u32 viid = cxgb4_port_viid(dev); u32 vf = FW_VIID_VIN_G(viid); u32 pf = FW_VIID_PFN_G(viid); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1a28df137e1f..7a271feec5e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -694,7 +694,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, { size_t len = nelem * elem_size + stat_size; void *s = NULL; - void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL); + void *p = dma_zalloc_coherent(dev, len, phys, GFP_KERNEL); if (!p) return NULL; @@ -708,7 +708,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, } if (metadata) *(void **)metadata = s; - memset(p, 0, len); return p; } @@ -1072,12 +1071,27 @@ static void *inline_tx_skb_header(const struct sk_buff *skb, static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb) { int csum_type; - const struct iphdr *iph = ip_hdr(skb); + bool inner_hdr_csum = false; + u16 proto, ver; - if (iph->version == 4) { - if (iph->protocol == IPPROTO_TCP) + if (skb->encapsulation && + (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)) + inner_hdr_csum = true; + + if (inner_hdr_csum) { + ver = inner_ip_hdr(skb)->version; + proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : + inner_ipv6_hdr(skb)->nexthdr; + } else { + ver = ip_hdr(skb)->version; + proto = (ver == 4) ? ip_hdr(skb)->protocol : + ipv6_hdr(skb)->nexthdr; + } + + if (ver == 4) { + if (proto == IPPROTO_TCP) csum_type = TX_CSUM_TCPIP; - else if (iph->protocol == IPPROTO_UDP) + else if (proto == IPPROTO_UDP) csum_type = TX_CSUM_UDPIP; else { nocsum: /* @@ -1090,19 +1104,29 @@ nocsum: /* /* * this doesn't work with extension headers */ - const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph; - - if (ip6h->nexthdr == IPPROTO_TCP) + if (proto == IPPROTO_TCP) csum_type = TX_CSUM_TCPIP6; - else if (ip6h->nexthdr == IPPROTO_UDP) + else if (proto == IPPROTO_UDP) csum_type = TX_CSUM_UDPIP6; else goto nocsum; } if (likely(csum_type >= TX_CSUM_TCPIP)) { - u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)); - int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + int eth_hdr_len, l4_len; + u64 hdr_len; + + if (inner_hdr_csum) { + /* This allows checksum offload for all encapsulated + * packets like GRE etc.. + */ + l4_len = skb_inner_network_header_len(skb); + eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; + } else { + l4_len = skb_network_header_len(skb); + eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; + } + hdr_len = TXPKT_IPHDR_LEN_V(l4_len); if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len); @@ -1273,7 +1297,7 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid, ctrl0, op; - u64 cntrl, *end; + u64 cntrl, *end, *sgl; int qidx, credits; unsigned int flits, ndesc; struct adapter *adap; @@ -1386,8 +1410,9 @@ out_free: dev_kfree_skb_any(skb); end = (u64 *)wr + flits; len = immediate ? skb->len : 0; + len += sizeof(*cpl); if (ssi->gso_size) { - struct cpl_tx_pkt_lso *lso = (void *)wr; + struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; int l3hdr_len = skb_network_header_len(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; @@ -1417,20 +1442,19 @@ out_free: dev_kfree_skb_any(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) cntrl = hwcsum(adap->params.chip, skb); } else { - lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | - LSO_IPV6_V(v6) | - LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | - LSO_IPHDR_LEN_V(l3hdr_len / 4) | - LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); - lso->c.ipid_ofst = htons(0); - lso->c.mss = htons(ssi->gso_size); - lso->c.seqno_offset = htonl(0); + lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->ipid_ofst = htons(0); + lso->mss = htons(ssi->gso_size); + lso->seqno_offset = htonl(0); if (is_t4(adap->params.chip)) - lso->c.len = htonl(skb->len); + lso->len = htonl(skb->len); else - lso->c.len = - htonl(LSO_T5_XFER_SIZE_V(skb->len)); + lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); cpl = (void *)(lso + 1); if (CHELSIO_CHIP_VERSION(adap->params.chip) @@ -1443,10 +1467,22 @@ out_free: dev_kfree_skb_any(skb); TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | TXPKT_IPHDR_LEN_V(l3hdr_len); } + sgl = (u64 *)(cpl + 1); /* sgl start here */ + if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { + /* If current position is already at the end of the + * txq, reset the current to point to start of the queue + * and update the end ptr as well. + */ + if (sgl == (u64 *)q->q.stat) { + int left = (u8 *)end - (u8 *)q->q.stat; + + end = (void *)q->q.desc + left; + sgl = (void *)q->q.desc; + } + } q->tso++; q->tx_cso += ssi->gso_segs; } else { - len += sizeof(*cpl); if (ptp_enabled) op = FW_PTP_TX_PKT_WR; else @@ -1454,6 +1490,7 @@ out_free: dev_kfree_skb_any(skb); wr->op_immdlen = htonl(FW_WR_OP_V(op) | FW_WR_IMMDLEN_V(len)); cpl = (void *)(wr + 1); + sgl = (u64 *)(cpl + 1); if (skb->ip_summed == CHECKSUM_PARTIAL) { cntrl = hwcsum(adap->params.chip, skb) | TXPKT_IPCSUM_DIS_F; @@ -1487,20 +1524,19 @@ out_free: dev_kfree_skb_any(skb); cpl->ctrl1 = cpu_to_be64(cntrl); if (immediate) { - cxgb4_inline_tx_skb(skb, &q->q, cpl + 1); + cxgb4_inline_tx_skb(skb, &q->q, sgl); dev_consume_skb_any(skb); } else { int last_desc; - cxgb4_write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), - end, 0, addr); + cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); skb_orphan(skb); last_desc = q->q.pidx + ndesc - 1; if (last_desc >= q->q.size) last_desc -= q->q.size; q->q.sdesc[last_desc].skb = skb; - q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; } txq_advance(&q->q, ndesc); @@ -2259,7 +2295,7 @@ static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, } static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, - const struct cpl_rx_pkt *pkt) + const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len) { struct adapter *adapter = rxq->rspq.adap; struct sge *s = &adapter->sge; @@ -2275,6 +2311,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, } copy_frags(skb, gl, s->pktshift); + if (tnl_hdr_len) + skb->csum_level = 1; skb->len = gl->tot_len - s->pktshift; skb->data_len = skb->len; skb->truesize += skb->data_len; @@ -2406,7 +2444,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sge *s = &q->adap->sge; int cpl_trace_pkt = is_t4(q->adap->params.chip) ? CPL_TRACE_PKT : CPL_TRACE_PKT_T5; - u16 err_vec; + u16 err_vec, tnl_hdr_len = 0; struct port_info *pi; int ret = 0; @@ -2415,16 +2453,19 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, pkt = (const struct cpl_rx_pkt *)rsp; /* Compressed error vector is enabled for T6 only */ - if (q->adap->params.tp.rx_pkt_encap) + if (q->adap->params.tp.rx_pkt_encap) { err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); - else + tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); + } else { err_vec = be16_to_cpu(pkt->err_vec); + } csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); - if ((pkt->l2info & htonl(RXF_TCP_F)) && + if (((pkt->l2info & htonl(RXF_TCP_F)) || + tnl_hdr_len) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { - do_gro(rxq, si, pkt); + do_gro(rxq, si, pkt, tnl_hdr_len); return 0; } @@ -2471,7 +2512,13 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, } else if (pkt->l2info & htonl(RXF_IP_F)) { __sum16 c = (__force __sum16)pkt->csum; skb->csum = csum_unfold(c); - skb->ip_summed = CHECKSUM_COMPLETE; + + if (tnl_hdr_len) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + } else { + skb->ip_summed = CHECKSUM_COMPLETE; + } rxq->stats.rx_cso++; } } else { diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c index 6228a5708307..82b70a565e24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/srq.c +++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c @@ -84,8 +84,7 @@ int cxgb4_get_srq_entry(struct net_device *dev, if (!skb) return -ENOMEM; req = (struct cpl_srq_table_req *) - __skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); + __skb_put_zero(skb, sizeof(*req)); INIT_TP_WR(req, 0); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SRQ_TABLE_REQ, TID_TID_V(srq_idx) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h index 54b718111e3f..721c77577ec5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_chip_type.h @@ -34,6 +34,8 @@ #ifndef __T4_CHIP_TYPE_H__ #define __T4_CHIP_TYPE_H__ +#define CHELSIO_PCI_ID_VER(__DeviceID) ((__DeviceID) >> 12) + #define CHELSIO_T4 0x4 #define CHELSIO_T5 0x5 #define CHELSIO_T6 0x6 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 7cb3ef466cc7..974a868a4824 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3941,8 +3941,9 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); - CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(FORCE_PAUSE); CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); @@ -3982,8 +3983,9 @@ static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) CAP32_TO_CAP16(802_3_PAUSE); CAP32_TO_CAP16(802_3_ASM_DIR); CAP32_TO_CAP16(ANEG); - CAP32_TO_CAP16(MDIX); + CAP32_TO_CAP16(FORCE_PAUSE); CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(MDISTRAIGHT); CAP32_TO_CAP16(FEC_RS); CAP32_TO_CAP16(FEC_BASER_RS); @@ -4014,6 +4016,8 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) fw_pause |= FW_PORT_CAP32_FC_RX; if (cc_pause & PAUSE_TX) fw_pause |= FW_PORT_CAP32_FC_TX; + if (!(cc_pause & PAUSE_AUTONEG)) + fw_pause |= FW_PORT_CAP32_FORCE_PAUSE; return fw_pause; } @@ -4058,14 +4062,17 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ -int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, - unsigned int port, struct link_config *lc) +int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc, + bool sleep_ok, int timeout) { unsigned int fw_caps = adapter->params.fw_caps_support; - struct fw_port_cmd cmd; - unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; + struct fw_port_cmd cmd; + unsigned int fw_mdi; + int ret; + fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps); /* Convert driver coding of Pause Frame Flow Control settings into the * Firmware's API. */ @@ -4087,7 +4094,7 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, /* Figure out what our Requested Port Capabilities are going to be. */ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { - rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; + rcap = lc->acaps | fw_fc | fw_fec; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; lc->fec = cc_fec; } else if (lc->autoneg == AUTONEG_DISABLE) { @@ -4098,6 +4105,17 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } + /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so + * we need to exclude this from this check in order to maintain + * compatibility ... + */ + if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) { + dev_err(adapter->pdev_dev, + "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n", + rcap, lc->pcaps); + return -EINVAL; + } + /* And send that on to the Firmware ... */ memset(&cmd, 0, sizeof(cmd)); @@ -4108,12 +4126,21 @@ int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 ? FW_PORT_ACTION_L1_CFG : FW_PORT_ACTION_L1_CFG32) | - FW_LEN16(cmd)); + FW_LEN16(cmd)); if (fw_caps == FW_CAPS16) cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); else cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); - return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + + ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL, + sleep_ok, timeout); + if (ret) { + dev_err(adapter->pdev_dev, + "Requested Port Capabilities %#x rejected, error %d\n", + rcap, -ret); + return ret; + } + return ret; } /** @@ -7513,6 +7540,43 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, } /** + * t4_free_encap_mac_filt - frees MPS entry at given index + * @adap: the adapter + * @viid: the VI id + * @idx: index of MPS entry to be freed + * @sleep_ok: call is allowed to sleep + * + * Frees the MPS entry at supplied index + * + * Returns a negative error number or zero on success + */ +int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, + int idx, bool sleep_ok) +{ + struct fw_vi_mac_exact *p; + u8 addr[] = {0, 0, 0, 0, 0, 0}; + struct fw_vi_mac_cmd c; + int ret = 0; + u32 exact; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC); + c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + exact | + FW_CMD_LEN16_V(1)); + p = c.u.exact; + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); + return ret; +} + +/** * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam * @adap: the adapter * @viid: the VI id @@ -7563,6 +7627,55 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, } /** + * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support + * @adap: the adapter + * @viid: the VI id + * @mac: the MAC address + * @mask: the mask + * @vni: the VNI id for the tunnel protocol + * @vni_mask: mask for the VNI id + * @dip_hit: to enable DIP match for the MPS entry + * @lookup_type: MAC address for inner (1) or outer (0) header + * @sleep_ok: call is allowed to sleep + * + * Allocates an MPS entry with specified MAC address and VNI value. + * + * Returns a negative error number or the allocated index for this mac. + */ +int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int vni, + unsigned int vni_mask, u8 dip_hit, u8 lookup_type, + bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + struct fw_vi_mac_vni *p = c.u.exact_vni; + int ret = 0; + u32 val; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + val = FW_CMD_LEN16_V(1) | + FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI); + c.freemacs_to_len16 = cpu_to_be32(val); + p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask)); + + p->lookup_type_to_vni = + cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) | + FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) | + FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type)); + p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask)); + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); + if (ret == 0) + ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); + return ret; +} + +/** * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam * @adap: the adapter * @viid: the VI id @@ -7909,6 +8022,34 @@ int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, } /** + * t4_enable_pi_params - enable/disable a Port's Virtual Interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pi: the Port Information structure + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * @dcb_en: 1=enable delivery of Data Center Bridging messages. + * + * Enables/disables a Port's Virtual Interface. Note that setting DCB + * Enable only makes sense when enabling a Virtual Interface ... + * If the Virtual Interface enable/disable operation is successful, + * we notify the OS-specific code of a potential Link Status change + * via the OS Contract API t4_os_link_changed(). + */ +int t4_enable_pi_params(struct adapter *adap, unsigned int mbox, + struct port_info *pi, + bool rx_en, bool tx_en, bool dcb_en) +{ + int ret = t4_enable_vi_params(adap, mbox, pi->viid, + rx_en, tx_en, dcb_en); + if (ret) + return ret; + t4_os_link_changed(adap, pi->port_id, + rx_en && tx_en && pi->link_cfg.link_ok); + return 0; +} + +/** * t4_identify_port - identify a VI's port by blinking its LED * @adap: the adapter * @mbox: mailbox to use for the FW command @@ -8249,6 +8390,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); + lc->new_module = false; + lc->redo_l1cfg = false; + if (mod_type != pi->mod_type) { /* With the newer SFP28 and QSFP28 Transceiver Module Types, * various fundamental Port Capabilities which used to be @@ -8283,6 +8427,8 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) pi->port_type = port_type; pi->mod_type = mod_type; + + lc->new_module = t4_is_inserted_mod_type(mod_type); t4_os_portmod_changed(adapter, pi->port_id); } @@ -8301,7 +8447,9 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->lpacaps = lpacaps; lc->acaps = acaps & ADVERT_MASK; - if (lc->acaps & FW_PORT_CAP32_ANEG) { + if (!(lc->acaps & FW_PORT_CAP32_ANEG)) { + lc->autoneg = AUTONEG_DISABLE; + } else if (lc->acaps & FW_PORT_CAP32_ANEG) { lc->autoneg = AUTONEG_ENABLE; } else { /* When Autoneg is disabled, user needs to set @@ -8315,6 +8463,26 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) t4_os_link_changed(adapter, pi->port_id, link_ok); } + + if (lc->new_module && lc->redo_l1cfg) { + struct link_config old_lc; + int ret; + + /* Save the current L1 Configuration and restore it if an + * error occurs. We probably should fix the l1_cfg*() + * routines not to change the link_config when an error + * occurs ... + */ + old_lc = *lc; + ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc); + if (ret) { + *lc = old_lc; + dev_warn(adapter->pdev_dev, + "Attempt to update new Transceiver Module settings failed\n"); + } + } + lc->new_module = false; + lc->redo_l1cfg = false; } /** @@ -8486,6 +8654,13 @@ static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, lc->requested_fec = FEC_AUTO; lc->fec = fwcap_to_cc_fec(lc->def_acaps); + /* If the Port is capable of Auto-Negtotiation, initialize it as + * "enabled" and copy over all of the Physical Port Capabilities + * to the Advertised Port Capabilities. Otherwise mark it as + * Auto-Negotiate disabled and select the highest supported speed + * for the link. Note parallel structure in t4_link_l1cfg_core() + * and t4_handle_get_port_info(). + */ if (lc->pcaps & FW_PORT_CAP32_ANEG) { lc->acaps = lc->pcaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; @@ -8493,6 +8668,7 @@ static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, } else { lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; + lc->speed_caps = fwcap_to_fwspeed(acaps); } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index fe2029e993a2..09e38f0733bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1233,6 +1233,11 @@ struct cpl_rx_pkt { #define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S) #define T6_COMPR_RXERR_SUM_F T6_COMPR_RXERR_SUM_V(1U) +#define T6_RX_TNLHDR_LEN_S 8 +#define T6_RX_TNLHDR_LEN_M 0xFF +#define T6_RX_TNLHDR_LEN_V(x) ((x) << T6_RX_TNLHDR_LEN_S) +#define T6_RX_TNLHDR_LEN_G(x) (((x) >> T6_RX_TNLHDR_LEN_S) & T6_RX_TNLHDR_LEN_M) + struct cpl_trace_pkt { u8 opcode; u8 intf; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 51b18035d691..c7f8d0441278 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -145,6 +145,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ + CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */ + CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */ + CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ @@ -184,6 +187,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */ + CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */ /* T6 adapters: */ @@ -208,6 +212,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */ CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 276fdf214b75..6b55aa2eb2a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1598,6 +1598,10 @@ #define VNIC_V(x) ((x) << VNIC_S) #define VNIC_F VNIC_V(1U) +#define USE_ENC_IDX_S 13 +#define USE_ENC_IDX_V(x) ((x) << USE_ENC_IDX_S) +#define USE_ENC_IDX_F USE_ENC_IDX_V(1U) + #define CSUM_HAS_PSEUDO_HDR_S 10 #define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S) #define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U) @@ -2995,6 +2999,7 @@ #define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c +#define LE_DB_CLCAM_TID_BASE_A 0x19df4 #define LE_DB_TID_HASHBASE_A 0x19df8 #define T6_LE_DB_HASH_TID_BASE_A 0x19df8 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index e3d4751f21ac..f1967cf6d43c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1305,6 +1305,8 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33, FW_PARAMS_PARAM_PFVF_TLS_START = 0x34, FW_PARAMS_PARAM_PFVF_TLS_END = 0x35, + FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36, + FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37, FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39, FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, }; @@ -2156,6 +2158,14 @@ struct fw_vi_mac_cmd { __be64 data0m_pkd; __be32 data1m[2]; } raw; + struct fw_vi_mac_vni { + __be16 valid_to_idx; + __u8 macaddr[6]; + __be16 r7; + __u8 macaddr_mask[6]; + __be32 lookup_type_to_vni; + __be32 vni_mask_pkd; + } exact_vni[2]; } u; }; @@ -2203,6 +2213,32 @@ struct fw_vi_mac_cmd { #define FW_VI_MAC_CMD_RAW_IDX_G(x) \ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M) +#define FW_VI_MAC_CMD_LOOKUP_TYPE_S 31 +#define FW_VI_MAC_CMD_LOOKUP_TYPE_M 0x1 +#define FW_VI_MAC_CMD_LOOKUP_TYPE_V(x) ((x) << FW_VI_MAC_CMD_LOOKUP_TYPE_S) +#define FW_VI_MAC_CMD_LOOKUP_TYPE_G(x) \ + (((x) >> FW_VI_MAC_CMD_LOOKUP_TYPE_S) & FW_VI_MAC_CMD_LOOKUP_TYPE_M) +#define FW_VI_MAC_CMD_LOOKUP_TYPE_F FW_VI_MAC_CMD_LOOKUP_TYPE_V(1U) + +#define FW_VI_MAC_CMD_DIP_HIT_S 30 +#define FW_VI_MAC_CMD_DIP_HIT_M 0x1 +#define FW_VI_MAC_CMD_DIP_HIT_V(x) ((x) << FW_VI_MAC_CMD_DIP_HIT_S) +#define FW_VI_MAC_CMD_DIP_HIT_G(x) \ + (((x) >> FW_VI_MAC_CMD_DIP_HIT_S) & FW_VI_MAC_CMD_DIP_HIT_M) +#define FW_VI_MAC_CMD_DIP_HIT_F FW_VI_MAC_CMD_DIP_HIT_V(1U) + +#define FW_VI_MAC_CMD_VNI_S 0 +#define FW_VI_MAC_CMD_VNI_M 0xffffff +#define FW_VI_MAC_CMD_VNI_V(x) ((x) << FW_VI_MAC_CMD_VNI_S) +#define FW_VI_MAC_CMD_VNI_G(x) \ + (((x) >> FW_VI_MAC_CMD_VNI_S) & FW_VI_MAC_CMD_VNI_M) + +#define FW_VI_MAC_CMD_VNI_MASK_S 0 +#define FW_VI_MAC_CMD_VNI_MASK_M 0xffffff +#define FW_VI_MAC_CMD_VNI_MASK_V(x) ((x) << FW_VI_MAC_CMD_VNI_MASK_S) +#define FW_VI_MAC_CMD_VNI_MASK_G(x) \ + (((x) >> FW_VI_MAC_CMD_VNI_MASK_S) & FW_VI_MAC_CMD_VNI_MASK_M) + #define FW_RXMODE_MTU_NO_CHG 65535 struct fw_vi_rxmode_cmd { @@ -2435,11 +2471,11 @@ enum fw_port_cap { FW_PORT_CAP_FC_RX = 0x0040, FW_PORT_CAP_FC_TX = 0x0080, FW_PORT_CAP_ANEG = 0x0100, - FW_PORT_CAP_MDIX = 0x0200, - FW_PORT_CAP_MDIAUTO = 0x0400, + FW_PORT_CAP_MDIAUTO = 0x0200, + FW_PORT_CAP_MDISTRAIGHT = 0x0400, FW_PORT_CAP_FEC_RS = 0x0800, FW_PORT_CAP_FEC_BASER_RS = 0x1000, - FW_PORT_CAP_FEC_RESERVED = 0x2000, + FW_PORT_CAP_FORCE_PAUSE = 0x2000, FW_PORT_CAP_802_3_PAUSE = 0x4000, FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; @@ -2479,14 +2515,15 @@ enum fw_port_mdi { #define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL #define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL #define FW_PORT_CAP32_ANEG 0x00100000UL -#define FW_PORT_CAP32_MDIX 0x00200000UL -#define FW_PORT_CAP32_MDIAUTO 0x00400000UL +#define FW_PORT_CAP32_MDIAUTO 0x00200000UL +#define FW_PORT_CAP32_MDISTRAIGHT 0x00400000UL #define FW_PORT_CAP32_FEC_RS 0x00800000UL #define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL #define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL #define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL #define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL -#define FW_PORT_CAP32_RESERVED2 0xf0000000UL +#define FW_PORT_CAP32_FORCE_PAUSE 0x10000000UL +#define FW_PORT_CAP32_RESERVED2 0xe0000000UL #define FW_PORT_CAP32_SPEED_S 0 #define FW_PORT_CAP32_SPEED_M 0xfff diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 123e2c1b65f5..4eb15ceddca3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x10 -#define T4FW_VERSION_MICRO 0x3F +#define T4FW_VERSION_MINOR 0x13 +#define T4FW_VERSION_MICRO 0x01 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x10 -#define T5FW_VERSION_MICRO 0x3F +#define T5FW_VERSION_MINOR 0x13 +#define T5FW_VERSION_MICRO 0x01 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x10 -#define T6FW_VERSION_MICRO 0x3F +#define T6FW_VERSION_MINOR 0x13 +#define T6FW_VERSION_MICRO 0x01 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 9a81b52307a9..ff84791a0ff8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -274,7 +274,7 @@ static int link_start(struct net_device *dev) * is enabled on a port. */ if (ret == 0) - ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true); + ret = t4vf_enable_pi(pi->adapter, pi, true, true); /* The Virtual Interfaces are connected to an internal switch on the * chip which allows VIs attached to the same port to talk to each @@ -822,8 +822,7 @@ static int cxgb4vf_stop(struct net_device *dev) netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - t4vf_enable_vi(adapter, pi->viid, false, false); - pi->link_cfg.link_ok = 0; + t4vf_enable_pi(adapter, pi, false, false); clear_bit(pi->port_id, &adapter->open_device_map); if (adapter->open_device_map == 0) @@ -1419,6 +1418,22 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, base->duplex = DUPLEX_UNKNOWN; } + if (pi->link_cfg.fc & PAUSE_RX) { + if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Pause); + } else { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + } else if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + base->autoneg = pi->link_cfg.autoneg; if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index dfce5df7538e..3007e1ac1e61 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -756,7 +756,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, * Allocate the hardware ring and PCI DMA bus address space for said. */ size_t hwlen = nelem * hwsize + stat_size; - void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); + void *hwring = dma_zalloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL); if (!hwring) return NULL; @@ -776,11 +776,6 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize, *(void **)swringp = swring; } - /* - * Zero out the hardware ring and return its address as our function - * value. - */ - memset(hwring, 0, hwlen); return hwring; } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 712e8f0c71b4..ccca67cf4487 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -391,7 +391,10 @@ int t4vf_config_rss_range(struct adapter *, unsigned int, int, int, int t4vf_alloc_vi(struct adapter *, int); int t4vf_free_vi(struct adapter *, int); -int t4vf_enable_vi(struct adapter *, unsigned int, bool, bool); +int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, bool rx_en, + bool tx_en); +int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi, bool rx_en, + bool tx_en); int t4vf_identify_port(struct adapter *, unsigned int, unsigned int); int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 798695bf8678..5b8c08cf523f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -341,8 +341,8 @@ static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); - CAP16_TO_CAP32(MDIX); CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); @@ -405,6 +405,36 @@ static unsigned int fwcap_to_speed(fw_port_cap32_t caps) return 0; } +/** + * fwcap_to_fwspeed - return highest speed in Port Capabilities + * @acaps: advertised Port Capabilities + * + * Get the highest speed for the port from the advertised Port + * Capabilities. It will be either the highest speed from the list of + * speeds or whatever user has set using ethtool. + */ +static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) +{ + #define TEST_SPEED_RETURN(__caps_speed) \ + do { \ + if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return FW_PORT_CAP32_SPEED_##__caps_speed; \ + } while (0) + + TEST_SPEED_RETURN(400G); + TEST_SPEED_RETURN(200G); + TEST_SPEED_RETURN(100G); + TEST_SPEED_RETURN(50G); + TEST_SPEED_RETURN(40G); + TEST_SPEED_RETURN(25G); + TEST_SPEED_RETURN(10G); + TEST_SPEED_RETURN(1G); + TEST_SPEED_RETURN(100M); + + #undef TEST_SPEED_RETURN + return 0; +} + /* * init_link_config - initialize a link's SW state * @lc: structure holding the link state @@ -431,6 +461,13 @@ static void init_link_config(struct link_config *lc, lc->requested_fec = FEC_AUTO; lc->fec = lc->auto_fec; + /* If the Port is capable of Auto-Negtotiation, initialize it as + * "enabled" and copy over all of the Physical Port Capabilities + * to the Advertised Port Capabilities. Otherwise mark it as + * Auto-Negotiate disabled and select the highest supported speed + * for the link. Note parallel structure in t4_link_l1cfg_core() + * and t4_handle_get_port_info(). + */ if (lc->pcaps & FW_PORT_CAP32_ANEG) { lc->acaps = acaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; @@ -438,6 +475,7 @@ static void init_link_config(struct link_config *lc, } else { lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; + lc->speed_caps = fwcap_to_fwspeed(acaps); } } @@ -1362,6 +1400,30 @@ int t4vf_enable_vi(struct adapter *adapter, unsigned int viid, } /** + * t4vf_enable_pi - enable/disable a Port's virtual interface + * @adapter: the adapter + * @pi: the Port Information structure + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a Port's virtual interface. If the Virtual + * Interface enable/disable operation is successful, we notify the + * OS-specific code of a potential Link Status change via the OS Contract + * API t4vf_os_link_changed(). + */ +int t4vf_enable_pi(struct adapter *adapter, struct port_info *pi, + bool rx_en, bool tx_en) +{ + int ret = t4vf_enable_vi(adapter, pi->viid, rx_en, tx_en); + + if (ret) + return ret; + t4vf_os_link_changed(adapter, pi->pidx, + rx_en && tx_en && pi->link_cfg.link_ok); + return 0; +} + +/** * t4vf_identify_port - identify a VI's port by blinking its LED * @adapter: the adapter * @viid: the Virtual Interface ID @@ -1955,7 +2017,14 @@ static void t4vf_handle_get_port_info(struct port_info *pi, lc->lpacaps = lpacaps; lc->acaps = acaps & ADVERT_MASK; - if (lc->acaps & FW_PORT_CAP32_ANEG) { + /* If we're not physically capable of Auto-Negotiation, note + * this as Auto-Negotiation disabled. Otherwise, we track + * what Auto-Negotiation settings we have. Note parallel + * structure in init_link_config(). + */ + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + lc->autoneg = AUTONEG_DISABLE; + } else if (lc->acaps & FW_PORT_CAP32_ANEG) { lc->autoneg = AUTONEG_ENABLE; } else { /* When Autoneg is disabled, user needs to set diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h index 4b5aacc09cab..240ba9d4c399 100644 --- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.h @@ -90,8 +90,7 @@ cxgb_mk_tid_release(struct sk_buff *skb, u32 len, u32 tid, u16 chan) { struct cpl_tid_release *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); @@ -104,8 +103,7 @@ cxgb_mk_close_con_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_close_con_req *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid)); @@ -119,8 +117,7 @@ cxgb_mk_abort_req(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_abort_req *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); @@ -134,8 +131,7 @@ cxgb_mk_abort_rpl(struct sk_buff *skb, u32 len, u32 tid, u16 chan) { struct cpl_abort_rpl *rpl; - rpl = __skb_put(skb, len); - memset(rpl, 0, len); + rpl = __skb_put_zero(skb, len); INIT_TP_WR(rpl, tid); OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); @@ -149,8 +145,7 @@ cxgb_mk_rx_data_ack(struct sk_buff *skb, u32 len, u32 tid, u16 chan, { struct cpl_rx_data_ack *req; - req = __skb_put(skb, len); - memset(req, 0, len); + req = __skb_put_zero(skb, len); INIT_TP_WR(req, tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, tid)); diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 869006c2002d..f42f7a6e1559 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -476,18 +476,28 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd) static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd) { + u8 rss_hash_type = 0; cmd->data = 0; + spin_lock_bh(&enic->devcmd_lock); + (void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); + spin_unlock_bh(&enic->devcmd_lock); switch (cmd->flow_type) { case TCP_V6_FLOW: case TCP_V4_FLOW: - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST; + break; case UDP_V6_FLOW: + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; case UDP_V4_FLOW: - if (vnic_dev_capable_udp_rss(enic->vdev)) + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + break; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 8a8b12b720ef..30d2eaa18c04 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2320,16 +2320,24 @@ static int enic_set_rss_nic_cfg(struct enic *enic) { struct device *dev = enic_get_dev(enic); const u8 rss_default_cpu = 0; - u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | - NIC_CFG_RSS_HASH_TYPE_IPV6 | - NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; const u8 rss_hash_bits = 7; const u8 rss_base_cpu = 0; + u8 rss_hash_type; + int res; u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); - if (vnic_dev_capable_udp_rss(enic->vdev)) - rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP; + spin_lock_bh(&enic->devcmd_lock); + res = vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type); + spin_unlock_bh(&enic->devcmd_lock); + if (res) { + /* defaults for old adapters + */ + rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | + NIC_CFG_RSS_HASH_TYPE_IPV6 | + NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; + } + if (rss_enable) { if (!enic_set_rsskey(enic)) { if (enic_set_rsscpu(enic, rss_hash_bits)) { diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c index 9c96911fb2c8..40b20817ddd5 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.c +++ b/drivers/net/ethernet/cisco/enic/enic_res.c @@ -149,6 +149,7 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en) { + enum vnic_devcmd_cmd cmd = CMD_NIC_CFG; u64 a0, a1; u32 nic_cfg; int wait = 1000; @@ -160,7 +161,11 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, a0 = nic_cfg; a1 = 0; - return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait); + if (rss_hash_type & (NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 | + NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)) + cmd = CMD_NIC_CFG_CHK; + + return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait); } int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len) diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 76cdd4c9d11f..e9db811df59c 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -1282,19 +1282,23 @@ int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, return ret; } -bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev) +int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type) { u64 a0 = CMD_NIC_CFG, a1 = 0; - u64 rss_hash_type; int wait = 1000; int err; err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait); - if (err || !a0) - return false; + /* rss_hash_type is valid only when a0 is 1. Adapter which does not + * support CMD_CAPABILITY for rss_hash_type has a0 = 0 + */ + if (err || (a0 != 1)) + return -EOPNOTSUPP; + + a1 = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) & + NIC_CFG_RSS_HASH_TYPE_MASK_FIELD; - rss_hash_type = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) & - NIC_CFG_RSS_HASH_TYPE_MASK_FIELD; + *rss_hash_type = (u8)a1; - return (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP); + return 0; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h index 59d4cc8fbb85..714fc1ed79e3 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.h +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h @@ -184,6 +184,6 @@ int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay, u16 vxlan_udp_port_number); int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature, u64 *supported_versions, u64 *a1); -bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev); +int vnic_dev_capable_rss_hash_type(struct vnic_dev *vdev, u8 *rss_hash_type); #endif /* _VNIC_DEV_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index 41de4ba622a1..fef5a0a0663d 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -148,8 +148,26 @@ enum vnic_devcmd_cmd { /* del VLAN id in (u16)a0 */ CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15), - /* nic_cfg in (u32)a0 */ + /* nic_cfg (no wait, always succeeds) + * in: (u32)a0 + * + * Capability query: + * out: (u64) a0 = 1 if a1 is valid + * (u64) a1 = (NIC_CFG bits supported) | (flags << 32) + * + * flags are CMD_NIC_CFG_CAPF_xxx + */ CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), + /* nic_cfg_chk (will return error if flags are invalid) + * in: (u32)a0 + * + * Capability query: + * out: (u64) a0 = 1 if a1 is valid + * (u64) a1 = (NIC_CFG bits supported) | (flags << 32) + * + * flags are CMD_NIC_CFG_CAPF_xxx + */ + CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16), /* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */ CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17), diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h index 5a93db0d7afc..84ff8ca17fcb 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_nic.h +++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h @@ -41,13 +41,14 @@ #define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL #define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24 +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0) #define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2) #define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4) #define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5) #define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6) -#define NIC_CFG_RSS_HASH_TYPE_UDP (1 << 7) +#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7) static inline void vnic_set_nic_cfg(u32 *nic_cfg, u8 rss_default_cpu, u8 rss_hash_type, diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index bd3f6e4d1341..ff9eb45f67f8 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -539,7 +539,7 @@ static int gmac_setup_txqs(struct net_device *netdev) } if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { - dev_warn(geth->dev, "TX queue base it not aligned\n"); + dev_warn(geth->dev, "TX queue base is not aligned\n"); kfree(skb_tab); return -ENOMEM; } @@ -680,7 +680,7 @@ static int gmac_setup_rxq(struct net_device *netdev) if (!port->rxq_ring) return -ENOMEM; if (port->rxq_dma_base & ~NONTOE_QHDR0_BASE_MASK) { - dev_warn(geth->dev, "RX queue base it not aligned\n"); + dev_warn(geth->dev, "RX queue base is not aligned\n"); return -ENOMEM; } @@ -905,7 +905,7 @@ static int geth_setup_freeq(struct gemini_ethernet *geth) if (!geth->freeq_ring) return -ENOMEM; if (geth->freeq_dma_base & ~DMA_Q_BASE_MASK) { - dev_warn(geth->dev, "queue ring base it not aligned\n"); + dev_warn(geth->dev, "queue ring base is not aligned\n"); goto err_freeq; } diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 8bb0db990c8f..00a57273b753 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1246,8 +1246,7 @@ error: mdiobus_unregister(priv->mdio); mdiobus_free(priv->mdio); free2: - if (priv->clk) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); free: free_netdev(netdev); out: @@ -1271,8 +1270,7 @@ static int ethoc_remove(struct platform_device *pdev) mdiobus_unregister(priv->mdio); mdiobus_free(priv->mdio); } - if (priv->clk) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); unregister_netdev(netdev); free_netdev(netdev); } diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 6e490fd2345d..a580a3dcbe59 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -22,7 +22,7 @@ if NET_VENDOR_FREESCALE config FEC tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ - ARCH_MXC || SOC_IMX28) + ARCH_MXC || SOC_IMX28 || COMPILE_TEST) default ARCH_MXC || SOC_IMX28 if ARM select PHYLIB imply PTP_1588_CLOCK diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index ed8ad0fefbda..0914a3ea4405 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -14,7 +14,6 @@ obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o -obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index e7381f8ef89d..4778b663653e 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -21,7 +21,7 @@ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9d3eed46830d..c729665107f5 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2053,13 +2053,9 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->parent = &pdev->dev; node = of_get_child_by_name(pdev->dev.of_node, "mdio"); - if (node) { - err = of_mdiobus_register(fep->mii_bus, node); + err = of_mdiobus_register(fep->mii_bus, node); + if (node) of_node_put(node); - } else { - err = mdiobus_register(fep->mii_bus); - } - if (err) goto err_out_free_mdiobus; @@ -2112,7 +2108,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) /* List of registers that can be safety be read to dump them with ethtool */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ - defined(CONFIG_ARM64) + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -3518,7 +3514,7 @@ fec_probe(struct platform_device *pdev) goto failed_init; for (i = 0; i < irq_cnt; i++) { - sprintf(irq_name, "int%d", i); + snprintf(irq_name, sizeof(irq_name), "int%d", i); irq = platform_get_irq_byname(pdev, irq_name); if (irq < 0) irq = platform_get_irq(pdev, i); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 43d973215040..36c2d7d6ee1b 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -454,12 +454,6 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -/** - * fec_ptp_hwtstamp_ioctl - control hardware time stamping - * @ndev: pointer to net_device - * @ifreq: ioctl data - * @cmd: particular ioctl requested - */ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) { struct fec_enet_private *fep = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 6552d68ea6e1..ce6e24c74978 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1391,12 +1391,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) /* FM_WRONG_RESET_VALUES_ERRATA_FMAN_A005127 Errata * workaround */ - if (port->rev_info.major >= 6) { - u32 reg; + u32 reg; - reg = 0x00001013; - iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp); - } + reg = 0x00001013; + iowrite32be(reg, &port->bmi_regs->tx.fmbm_tfp); } return 0; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 5aa814799d70..8e42c0246611 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1372,7 +1372,4 @@ struct filer_table { struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; }; -/* The gianfar_ptp module will set this variable */ -extern int gfar_phc_index; - #endif /* __GIANFAR_H */ diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index a93e0199c369..8cb98cae0a6f 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -41,6 +41,8 @@ #include <linux/phy.h> #include <linux/sort.h> #include <linux/if_vlan.h> +#include <linux/of_platform.h> +#include <linux/fsl/ptp_qoriq.h> #include "gianfar.h" @@ -1509,24 +1511,35 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -int gfar_phc_index = -1; -EXPORT_SYMBOL(gfar_phc_index); - static int gfar_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { struct gfar_private *priv = netdev_priv(dev); + struct platform_device *ptp_dev; + struct device_node *ptp_node; + struct qoriq_ptp *ptp = NULL; + + info->phc_index = -1; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; return 0; } + + ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp"); + if (ptp_node) { + ptp_dev = of_find_device_by_node(ptp_node); + if (ptp_dev) + ptp = platform_get_drvdata(ptp_dev); + } + + if (ptp) + info->phc_index = ptp->phc_index; + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = gfar_phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c deleted file mode 100644 index 9f8d4f8e57e3..000000000000 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ /dev/null @@ -1,572 +0,0 @@ -/* - * PTP 1588 clock using the eTSEC - * - * Copyright (C) 2010 OMICRON electronics GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/device.h> -#include <linux/hrtimer.h> -#include <linux/interrupt.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <linux/timex.h> -#include <linux/io.h> - -#include <linux/ptp_clock_kernel.h> - -#include "gianfar.h" - -/* - * gianfar ptp registers - * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 - */ -struct gianfar_ptp_registers { - u32 tmr_ctrl; /* Timer control register */ - u32 tmr_tevent; /* Timestamp event register */ - u32 tmr_temask; /* Timer event mask register */ - u32 tmr_pevent; /* Timestamp event register */ - u32 tmr_pemask; /* Timer event mask register */ - u32 tmr_stat; /* Timestamp status register */ - u32 tmr_cnt_h; /* Timer counter high register */ - u32 tmr_cnt_l; /* Timer counter low register */ - u32 tmr_add; /* Timer drift compensation addend register */ - u32 tmr_acc; /* Timer accumulator register */ - u32 tmr_prsc; /* Timer prescale */ - u8 res1[4]; - u32 tmroff_h; /* Timer offset high */ - u32 tmroff_l; /* Timer offset low */ - u8 res2[8]; - u32 tmr_alarm1_h; /* Timer alarm 1 high register */ - u32 tmr_alarm1_l; /* Timer alarm 1 high register */ - u32 tmr_alarm2_h; /* Timer alarm 2 high register */ - u32 tmr_alarm2_l; /* Timer alarm 2 high register */ - u8 res3[48]; - u32 tmr_fiper1; /* Timer fixed period interval */ - u32 tmr_fiper2; /* Timer fixed period interval */ - u32 tmr_fiper3; /* Timer fixed period interval */ - u8 res4[20]; - u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ - u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ - u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ - u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ -}; - -/* Bit definitions for the TMR_CTRL register */ -#define ALM1P (1<<31) /* Alarm1 output polarity */ -#define ALM2P (1<<30) /* Alarm2 output polarity */ -#define FIPERST (1<<28) /* FIPER start indication */ -#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ -#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ -#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ -#define TCLK_PERIOD_MASK (0x3ff) -#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ -#define FRD (1<<14) /* FIPER Realignment Disable */ -#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ -#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ -#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ -#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ -#define COPH (1<<7) /* Generated clock output phase. */ -#define CIPH (1<<6) /* External oscillator input clock phase */ -#define TMSR (1<<5) /* Timer soft reset. */ -#define BYP (1<<3) /* Bypass drift compensated clock */ -#define TE (1<<2) /* 1588 timer enable. */ -#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ -#define CKSEL_MASK (0x3) - -/* Bit definitions for the TMR_TEVENT register */ -#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ -#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ -#define ALM2 (1<<17) /* Current time = alarm time register 2 */ -#define ALM1 (1<<16) /* Current time = alarm time register 1 */ -#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ -#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ -#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ - -/* Bit definitions for the TMR_TEMASK register */ -#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ -#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ -#define ALM2EN (1<<17) /* Timer ALM2 event enable */ -#define ALM1EN (1<<16) /* Timer ALM1 event enable */ -#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ -#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ - -/* Bit definitions for the TMR_PEVENT register */ -#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ -#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ -#define RXP (1<<0) /* PTP frame has been received */ - -/* Bit definitions for the TMR_PEMASK register */ -#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ -#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ -#define RXPEN (1<<0) /* Receive PTP packet event enable */ - -/* Bit definitions for the TMR_STAT register */ -#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ -#define STAT_VEC_MASK (0x3f) - -/* Bit definitions for the TMR_PRSC register */ -#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ -#define PRSC_OCK_MASK (0xffff) - - -#define DRIVER "gianfar_ptp" -#define DEFAULT_CKSEL 1 -#define N_EXT_TS 2 -#define REG_SIZE sizeof(struct gianfar_ptp_registers) - -struct etsects { - struct gianfar_ptp_registers __iomem *regs; - spinlock_t lock; /* protects regs */ - struct ptp_clock *clock; - struct ptp_clock_info caps; - struct resource *rsrc; - int irq; - u64 alarm_interval; /* for periodic alarm */ - u64 alarm_value; - u32 tclk_period; /* nanoseconds */ - u32 tmr_prsc; - u32 tmr_add; - u32 cksel; - u32 tmr_fiper1; - u32 tmr_fiper2; -}; - -/* - * Register access functions - */ - -/* Caller must hold etsects->lock. */ -static u64 tmr_cnt_read(struct etsects *etsects) -{ - u64 ns; - u32 lo, hi; - - lo = gfar_read(&etsects->regs->tmr_cnt_l); - hi = gfar_read(&etsects->regs->tmr_cnt_h); - ns = ((u64) hi) << 32; - ns |= lo; - return ns; -} - -/* Caller must hold etsects->lock. */ -static void tmr_cnt_write(struct etsects *etsects, u64 ns) -{ - u32 hi = ns >> 32; - u32 lo = ns & 0xffffffff; - - gfar_write(&etsects->regs->tmr_cnt_l, lo); - gfar_write(&etsects->regs->tmr_cnt_h, hi); -} - -/* Caller must hold etsects->lock. */ -static void set_alarm(struct etsects *etsects) -{ - u64 ns; - u32 lo, hi; - - ns = tmr_cnt_read(etsects) + 1500000000ULL; - ns = div_u64(ns, 1000000000UL) * 1000000000ULL; - ns -= etsects->tclk_period; - hi = ns >> 32; - lo = ns & 0xffffffff; - gfar_write(&etsects->regs->tmr_alarm1_l, lo); - gfar_write(&etsects->regs->tmr_alarm1_h, hi); -} - -/* Caller must hold etsects->lock. */ -static void set_fipers(struct etsects *etsects) -{ - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); - gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); -} - -/* - * Interrupt service routine - */ - -static irqreturn_t isr(int irq, void *priv) -{ - struct etsects *etsects = priv; - struct ptp_clock_event event; - u64 ns; - u32 ack = 0, lo, hi, mask, val; - - val = gfar_read(&etsects->regs->tmr_tevent); - - if (val & ETS1) { - ack |= ETS1; - hi = gfar_read(&etsects->regs->tmr_etts1_h); - lo = gfar_read(&etsects->regs->tmr_etts1_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 0; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(etsects->clock, &event); - } - - if (val & ETS2) { - ack |= ETS2; - hi = gfar_read(&etsects->regs->tmr_etts2_h); - lo = gfar_read(&etsects->regs->tmr_etts2_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 1; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(etsects->clock, &event); - } - - if (val & ALM2) { - ack |= ALM2; - if (etsects->alarm_value) { - event.type = PTP_CLOCK_ALARM; - event.index = 0; - event.timestamp = etsects->alarm_value; - ptp_clock_event(etsects->clock, &event); - } - if (etsects->alarm_interval) { - ns = etsects->alarm_value + etsects->alarm_interval; - hi = ns >> 32; - lo = ns & 0xffffffff; - spin_lock(&etsects->lock); - gfar_write(&etsects->regs->tmr_alarm2_l, lo); - gfar_write(&etsects->regs->tmr_alarm2_h, hi); - spin_unlock(&etsects->lock); - etsects->alarm_value = ns; - } else { - gfar_write(&etsects->regs->tmr_tevent, ALM2); - spin_lock(&etsects->lock); - mask = gfar_read(&etsects->regs->tmr_temask); - mask &= ~ALM2EN; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock(&etsects->lock); - etsects->alarm_value = 0; - etsects->alarm_interval = 0; - } - } - - if (val & PP1) { - ack |= PP1; - event.type = PTP_CLOCK_PPS; - ptp_clock_event(etsects->clock, &event); - } - - if (ack) { - gfar_write(&etsects->regs->tmr_tevent, ack); - return IRQ_HANDLED; - } else - return IRQ_NONE; -} - -/* - * PTP clock operations - */ - -static int ptp_gianfar_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) -{ - u64 adj, diff; - u32 tmr_add; - int neg_adj = 0; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - if (scaled_ppm < 0) { - neg_adj = 1; - scaled_ppm = -scaled_ppm; - } - tmr_add = etsects->tmr_add; - adj = tmr_add; - - /* calculate diff as adj*(scaled_ppm/65536)/1000000 - * and round() to the nearest integer - */ - adj *= scaled_ppm; - diff = div_u64(adj, 8000000); - diff = (diff >> 13) + ((diff >> 12) & 1); - - tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - - gfar_write(&etsects->regs->tmr_add, tmr_add); - - return 0; -} - -static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - s64 now; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - spin_lock_irqsave(&etsects->lock, flags); - - now = tmr_cnt_read(etsects); - now += delta; - tmr_cnt_write(etsects, now); - set_fipers(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - return 0; -} - -static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - spin_lock_irqsave(&etsects->lock, flags); - - ns = tmr_cnt_read(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - *ts = ns_to_timespec64(ns); - - return 0; -} - -static int ptp_gianfar_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - u64 ns; - unsigned long flags; - struct etsects *etsects = container_of(ptp, struct etsects, caps); - - ns = timespec64_to_ns(ts); - - spin_lock_irqsave(&etsects->lock, flags); - - tmr_cnt_write(etsects, ns); - set_fipers(etsects); - - spin_unlock_irqrestore(&etsects->lock, flags); - - return 0; -} - -static int ptp_gianfar_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) -{ - struct etsects *etsects = container_of(ptp, struct etsects, caps); - unsigned long flags; - u32 bit, mask; - - switch (rq->type) { - case PTP_CLK_REQ_EXTTS: - switch (rq->extts.index) { - case 0: - bit = ETS1EN; - break; - case 1: - bit = ETS2EN; - break; - default: - return -EINVAL; - } - spin_lock_irqsave(&etsects->lock, flags); - mask = gfar_read(&etsects->regs->tmr_temask); - if (on) - mask |= bit; - else - mask &= ~bit; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock_irqrestore(&etsects->lock, flags); - return 0; - - case PTP_CLK_REQ_PPS: - spin_lock_irqsave(&etsects->lock, flags); - mask = gfar_read(&etsects->regs->tmr_temask); - if (on) - mask |= PP1EN; - else - mask &= ~PP1EN; - gfar_write(&etsects->regs->tmr_temask, mask); - spin_unlock_irqrestore(&etsects->lock, flags); - return 0; - - default: - break; - } - - return -EOPNOTSUPP; -} - -static const struct ptp_clock_info ptp_gianfar_caps = { - .owner = THIS_MODULE, - .name = "gianfar clock", - .max_adj = 512000, - .n_alarm = 0, - .n_ext_ts = N_EXT_TS, - .n_per_out = 0, - .n_pins = 0, - .pps = 1, - .adjfine = ptp_gianfar_adjfine, - .adjtime = ptp_gianfar_adjtime, - .gettime64 = ptp_gianfar_gettime, - .settime64 = ptp_gianfar_settime, - .enable = ptp_gianfar_enable, -}; - -static int gianfar_ptp_probe(struct platform_device *dev) -{ - struct device_node *node = dev->dev.of_node; - struct etsects *etsects; - struct timespec64 now; - int err = -ENOMEM; - u32 tmr_ctrl; - unsigned long flags; - - etsects = kzalloc(sizeof(*etsects), GFP_KERNEL); - if (!etsects) - goto no_memory; - - err = -ENODEV; - - etsects->caps = ptp_gianfar_caps; - - if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel)) - etsects->cksel = DEFAULT_CKSEL; - - if (of_property_read_u32(node, - "fsl,tclk-period", &etsects->tclk_period) || - of_property_read_u32(node, - "fsl,tmr-prsc", &etsects->tmr_prsc) || - of_property_read_u32(node, - "fsl,tmr-add", &etsects->tmr_add) || - of_property_read_u32(node, - "fsl,tmr-fiper1", &etsects->tmr_fiper1) || - of_property_read_u32(node, - "fsl,tmr-fiper2", &etsects->tmr_fiper2) || - of_property_read_u32(node, - "fsl,max-adj", &etsects->caps.max_adj)) { - pr_err("device tree node missing required elements\n"); - goto no_node; - } - - etsects->irq = platform_get_irq(dev, 0); - - if (etsects->irq < 0) { - pr_err("irq not in device tree\n"); - goto no_node; - } - if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) { - pr_err("request_irq failed\n"); - goto no_node; - } - - etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!etsects->rsrc) { - pr_err("no resource\n"); - goto no_resource; - } - if (request_resource(&iomem_resource, etsects->rsrc)) { - pr_err("resource busy\n"); - goto no_resource; - } - - spin_lock_init(&etsects->lock); - - etsects->regs = ioremap(etsects->rsrc->start, - resource_size(etsects->rsrc)); - if (!etsects->regs) { - pr_err("ioremap ptp registers failed\n"); - goto no_ioremap; - } - getnstimeofday64(&now); - ptp_gianfar_settime(&etsects->caps, &now); - - tmr_ctrl = - (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | - (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT; - - spin_lock_irqsave(&etsects->lock, flags); - - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl); - gfar_write(&etsects->regs->tmr_add, etsects->tmr_add); - gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); - gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); - gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); - set_alarm(etsects); - gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); - - spin_unlock_irqrestore(&etsects->lock, flags); - - etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); - if (IS_ERR(etsects->clock)) { - err = PTR_ERR(etsects->clock); - goto no_clock; - } - gfar_phc_index = ptp_clock_index(etsects->clock); - - platform_set_drvdata(dev, etsects); - - return 0; - -no_clock: - iounmap(etsects->regs); -no_ioremap: - release_resource(etsects->rsrc); -no_resource: - free_irq(etsects->irq, etsects); -no_node: - kfree(etsects); -no_memory: - return err; -} - -static int gianfar_ptp_remove(struct platform_device *dev) -{ - struct etsects *etsects = platform_get_drvdata(dev); - - gfar_write(&etsects->regs->tmr_temask, 0); - gfar_write(&etsects->regs->tmr_ctrl, 0); - - gfar_phc_index = -1; - ptp_clock_unregister(etsects->clock); - iounmap(etsects->regs); - release_resource(etsects->rsrc); - free_irq(etsects->irq, etsects); - kfree(etsects); - - return 0; -} - -static const struct of_device_id match_table[] = { - { .compatible = "fsl,etsec-ptp" }, - {}, -}; -MODULE_DEVICE_TABLE(of, match_table); - -static struct platform_driver gianfar_ptp_driver = { - .driver = { - .name = "gianfar_ptp", - .of_match_table = match_table, - }, - .probe = gianfar_ptp_probe, - .remove = gianfar_ptp_remove, -}; - -module_platform_driver(gianfar_ptp_driver); - -MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); -MODULE_DESCRIPTION("PTP clock using the eTSEC"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index e0bc79ea3d88..85e1d14514fc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1648,6 +1648,15 @@ int hns_dsaf_rm_mac_addr( mac_entry->addr); } +static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev, + u8 port_num, u8 *mask, u8 *addr) +{ + if (MAC_IS_BROADCAST(addr)) + memset(mask, 0xff, ETH_ALEN); + else + memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN); +} + static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src) { u16 *a = (u16 *)dst; @@ -1676,7 +1685,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key tmp_mac_key; struct dsaf_tbl_tcam_data tcam_data; u8 mc_addr[ETH_ALEN]; - u8 *mc_mask; int mskid; /*chechk mac addr */ @@ -1687,9 +1695,12 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, } ether_addr_copy(mc_addr, mac_entry->addr); - mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + u8 mc_mask[ETH_ALEN]; + /* prepare for key data setting */ + hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num, + mc_mask, mac_entry->addr); hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ @@ -1844,7 +1855,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, struct dsaf_drv_tbl_tcam_key mask_key, tmp_mac_key; struct dsaf_tbl_tcam_data *pmask_key = NULL; u8 mc_addr[ETH_ALEN]; - u8 *mc_mask; if (!(void *)mac_entry) { dev_err(dsaf_dev->dev, @@ -1861,14 +1871,17 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /* always mask vlan_id field */ ether_addr_copy(mc_addr, mac_entry->addr); - mc_mask = dsaf_dev->mac_cb[mac_entry->in_port_num]->mc_mask; if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + u8 mc_mask[ETH_ALEN]; + /* prepare for key data setting */ + hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num, + mc_mask, mac_entry->addr); hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask); /* config key mask */ - hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_addr); + hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); mask_key.high.val = le32_to_cpu(mask_key.high.val); mask_key.low.val = le32_to_cpu(mask_key.low.val); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 519e2bd6aa60..be9dc08ccf67 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -47,6 +47,8 @@ enum hclge_mbx_mac_vlan_subcode { HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */ HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, /* config func MTA enable */ + HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, /* read func MTA type */ + HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, /* update MTA status */ }; /* below are per-VF vlan cfg subcodes */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 02145f2de820..9d79dad2c6aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -36,6 +36,49 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, return false; } +static void hnae3_set_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, int inited) +{ + switch (client->type) { + case HNAE3_CLIENT_KNIC: + hnae_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); + break; + case HNAE3_CLIENT_UNIC: + hnae_set_bit(ae_dev->flag, HNAE3_UNIC_CLIENT_INITED_B, inited); + break; + case HNAE3_CLIENT_ROCE: + hnae_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited); + break; + default: + break; + } +} + +static int hnae3_get_client_init_flag(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + int inited = 0; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_KNIC_CLIENT_INITED_B); + break; + case HNAE3_CLIENT_UNIC: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_UNIC_CLIENT_INITED_B); + break; + case HNAE3_CLIENT_ROCE: + inited = hnae_get_bit(ae_dev->flag, + HNAE3_ROCE_CLIENT_INITED_B); + break; + default: + break; + } + + return inited; +} + static int hnae3_match_n_instantiate(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, bool is_reg) { @@ -50,13 +93,22 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, /* now, (un-)instantiate client by calling lower layer */ if (is_reg) { ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) + if (ret) { dev_err(&ae_dev->pdev->dev, "fail to instantiate client\n"); - return ret; + return ret; + } + + hnae3_set_client_init_flag(client, ae_dev, 1); + return 0; + } + + if (hnae3_get_client_init_flag(client, ae_dev)) { + ae_dev->ops->uninit_client_instance(client, ae_dev); + + hnae3_set_client_init_flag(client, ae_dev, 0); } - ae_dev->ops->uninit_client_instance(client, ae_dev); return 0; } @@ -89,7 +141,7 @@ int hnae3_register_client(struct hnae3_client *client) exit: mutex_unlock(&hnae3_common_lock); - return ret; + return 0; } EXPORT_SYMBOL(hnae3_register_client); @@ -112,7 +164,7 @@ EXPORT_SYMBOL(hnae3_unregister_client); * @ae_algo: AE algorithm * NOTE: the duplicated name will not be checked */ -int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) +void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) { const struct pci_device_id *id; struct hnae3_ae_dev *ae_dev; @@ -151,8 +203,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) } mutex_unlock(&hnae3_common_lock); - - return ret; } EXPORT_SYMBOL(hnae3_register_ae_algo); @@ -168,6 +218,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + continue; + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); if (!id) continue; @@ -191,22 +244,14 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo); * @ae_dev: the AE device * NOTE: the duplicated name will not be checked */ -int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) { const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - int ret = 0, lock_acquired; + int ret = 0; - /* we can get deadlocked if SRIOV is being enabled in context to probe - * and probe gets called again in same context. This can happen when - * pci_enable_sriov() is called to create VFs from PF probes context. - * Therefore, for simplicity uniformly defering further probing in all - * cases where we detect contention. - */ - lock_acquired = mutex_trylock(&hnae3_common_lock); - if (!lock_acquired) - return -EPROBE_DEFER; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); @@ -220,7 +265,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!ae_dev->ops) { dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); - ret = -EOPNOTSUPP; goto out_err; } @@ -247,8 +291,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) out_err: mutex_unlock(&hnae3_common_lock); - - return ret; } EXPORT_SYMBOL(hnae3_register_ae_dev); @@ -264,6 +306,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + if (!hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)) + continue; + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); if (!id) continue; @@ -283,3 +328,4 @@ EXPORT_SYMBOL(hnae3_unregister_ae_dev); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework"); +MODULE_VERSION(HNAE3_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 37ec1b3286c6..8acb1d116a02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -36,6 +36,8 @@ #include <linux/pci.h> #include <linux/types.h> +#define HNAE3_MOD_VERSION "1.0" + /* Device IDs */ #define HNAE3_DEV_ID_GE 0xA220 #define HNAE3_DEV_ID_25GE 0xA221 @@ -52,6 +54,9 @@ #define HNAE3_DEV_INITED_B 0x0 #define HNAE3_DEV_SUPPORT_ROCE_B 0x1 #define HNAE3_DEV_SUPPORT_DCB_B 0x2 +#define HNAE3_KNIC_CLIENT_INITED_B 0x3 +#define HNAE3_UNIC_CLIENT_INITED_B 0x4 +#define HNAE3_ROCE_CLIENT_INITED_B 0x5 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) @@ -273,10 +278,6 @@ struct hnae3_ae_dev { * Map rings to vector * unmap_ring_from_vector() * Unmap rings from vector - * add_tunnel_udp() - * Add tunnel information to hardware - * del_tunnel_udp() - * Delete tunnel information from hardware * reset_queue() * Reset queue * get_fw_version() @@ -315,7 +316,8 @@ struct hnae3_ae_ops { int (*set_loopback)(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en); - void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en); + void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc); int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); void (*get_pauseparam)(struct hnae3_handle *handle, @@ -351,6 +353,7 @@ struct hnae3_ae_ops { const unsigned char *addr); int (*rm_mc_addr)(struct hnae3_handle *handle, const unsigned char *addr); + int (*update_mta_status)(struct hnae3_handle *handle); void (*set_tso_stats)(struct hnae3_handle *handle, int enable); void (*update_stats)(struct hnae3_handle *handle, @@ -388,9 +391,6 @@ struct hnae3_ae_ops { int vector_num, struct hnae3_ring_chain_node *vr_chain); - int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); - int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); - void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); u32 (*get_fw_version)(struct hnae3_handle *handle); void (*get_mdix_mode)(struct hnae3_handle *handle, @@ -521,11 +521,11 @@ struct hnae3_handle { #define hnae_get_bit(origin, shift) \ hnae_get_field((origin), (0x1 << (shift)), (shift)) -int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); -int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); +void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); void hnae3_unregister_client(struct hnae3_client *client); int hnae3_register_client(struct hnae3_client *client); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 8c55965a66ac..f2b31d278bc9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -25,6 +25,9 @@ #include "hnae3.h" #include "hns3_enet.h" +static void hns3_clear_all_ring(struct hnae3_handle *h); +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); + static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; static const char hns3_driver_string[] = @@ -273,6 +276,10 @@ static int hns3_nic_net_up(struct net_device *netdev) int i, j; int ret; + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + /* get irq resource for all vectors */ ret = hns3_nic_init_irq(priv); if (ret) { @@ -333,17 +340,19 @@ static void hns3_nic_net_down(struct net_device *netdev) if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return; + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) ops->stop(priv->ae_handle); - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - /* free irq resources */ hns3_nic_uninit_irq(priv); + + hns3_clear_all_ring(priv->ae_handle); } static int hns3_nic_net_stop(struct net_device *netdev) @@ -406,15 +415,21 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) if (h->ae_algo->ops->set_promisc_mode) { if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, 1); + h->ae_algo->ops->set_promisc_mode(h, true, true); + else if (netdev->flags & IFF_ALLMULTI) + h->ae_algo->ops->set_promisc_mode(h, false, true); else - h->ae_algo->ops->set_promisc_mode(h, 0); + h->ae_algo->ops->set_promisc_mode(h, false, false); } if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) netdev_err(netdev, "sync uc address fail\n"); - if (netdev->flags & IFF_MULTICAST) + if (netdev->flags & IFF_MULTICAST) { if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) netdev_err(netdev, "sync mc address fail\n"); + + if (h->ae_algo->ops->update_mta_status) + h->ae_algo->ops->update_mta_status(h); + } } static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, @@ -502,7 +517,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, /* find outer header point */ l3.hdr = skb_network_header(skb); - l4_hdr = skb_inner_transport_header(skb); + l4_hdr = skb_transport_header(skb); if (skb->protocol == htons(ETH_P_IPV6)) { exthdr = l3.hdr + sizeof(*l3.v6); @@ -644,6 +659,32 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, } } +/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL + * and it is udp packet, which has a dest port as the IANA assigned. + * the hardware is expected to do the checksum offload, but the + * hardware will not do the checksum offload when udp dest port is + * 4789. + */ +static bool hns3_tunnel_csum_bug(struct sk_buff *skb) +{ +#define IANA_VXLAN_PORT 4789 + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + + l4.hdr = skb_transport_header(skb); + + if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) + return false; + + skb_checksum_help(skb); + + return true; +} + static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) @@ -732,6 +773,9 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_L4T_TCP); break; case IPPROTO_UDP: + if (hns3_tunnel_csum_bug(skb)) + break; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_M, HNS3_TXD_L4T_S, @@ -1121,6 +1165,12 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) return -EADDRNOTAVAIL; + if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", + mac_addr->sa_data); + return 0; + } + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); @@ -1244,93 +1294,6 @@ static void hns3_nic_get_stats64(struct net_device *netdev, stats->tx_compressed = netdev->stats.tx_compressed; } -static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (udp_tnl->used && udp_tnl->dst_port == port) { - udp_tnl->used++; - return; - } - - if (udp_tnl->used) { - netdev_warn(netdev, - "UDP tunnel [%d], port [%d] offload\n", type, port); - return; - } - - udp_tnl->dst_port = port; - udp_tnl->used = 1; - /* TBD send command to hardware to add port */ - if (h->ae_algo->ops->add_tunnel_udp) - h->ae_algo->ops->add_tunnel_udp(h, port); -} - -static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (!udp_tnl->used || udp_tnl->dst_port != port) { - netdev_warn(netdev, - "Invalid UDP tunnel port %d\n", port); - return; - } - - udp_tnl->used--; - if (udp_tnl->used) - return; - - udp_tnl->dst_port = 0; - /* TBD send command to hardware to del port */ - if (h->ae_algo->ops->del_tunnel_udp) - h->ae_algo->ops->del_tunnel_udp(h, port); -} - -/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports - * @netdev: This physical ports's netdev - * @ti: Tunnel information - */ -static void hns3_nic_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); - break; - } -} - -static void hns3_nic_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - break; - } -} - static int hns3_setup_tc(struct net_device *netdev, void *type_data) { struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; @@ -1569,13 +1532,50 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_get_stats64 = hns3_nic_get_stats64, .ndo_setup_tc = hns3_nic_setup_tc, .ndo_set_rx_mode = hns3_nic_set_rx_mode, - .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, - .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, }; +static bool hns3_is_phys_func(struct pci_dev *pdev) +{ + u32 dev_id = pdev->device; + + switch (dev_id) { + case HNAE3_DEV_ID_GE: + case HNAE3_DEV_ID_25GE: + case HNAE3_DEV_ID_25GE_RDMA: + case HNAE3_DEV_ID_25GE_RDMA_MACSEC: + case HNAE3_DEV_ID_50GE_RDMA: + case HNAE3_DEV_ID_50GE_RDMA_MACSEC: + case HNAE3_DEV_ID_100G_RDMA_MACSEC: + return true; + case HNAE3_DEV_ID_100G_VF: + case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: + return false; + default: + dev_warn(&pdev->dev, "un-recognized pci device-id %d", + dev_id); + } + + return false; +} + +static void hns3_disable_sriov(struct pci_dev *pdev) +{ + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(pdev)) { + dev_warn(&pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } + + pci_disable_sriov(pdev); +} + /* hns3_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in hns3_pci_tbl @@ -1603,7 +1603,9 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ae_dev->dev_type = HNAE3_DEV_KNIC; pci_set_drvdata(pdev, ae_dev); - return hnae3_register_ae_dev(ae_dev); + hnae3_register_ae_dev(ae_dev); + + return 0; } /* hns3_remove - Device removal routine @@ -1613,21 +1615,56 @@ static void hns3_remove(struct pci_dev *pdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV)) + hns3_disable_sriov(pdev); + hnae3_unregister_ae_dev(ae_dev); } +/** + * hns3_pci_sriov_configure + * @pdev: pointer to a pci_dev structure + * @num_vfs: number of VFs to allocate + * + * Enable or change the number of VFs. Called when the user updates the number + * of VFs in sysfs. + **/ +static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + int ret; + + if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) { + dev_warn(&pdev->dev, "Can not config SRIOV\n"); + return -EINVAL; + } + + if (num_vfs) { + ret = pci_enable_sriov(pdev, num_vfs); + if (ret) + dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); + else + return num_vfs; + } else if (!pci_vfs_assigned(pdev)) { + pci_disable_sriov(pdev); + } else { + dev_warn(&pdev->dev, + "Unable to free VFs because some are assigned to VMs.\n"); + } + + return 0; +} + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .sriov_configure = hns3_pci_sriov_configure, }; /* set default feature to hns3 */ static void hns3_set_default_feature(struct net_device *netdev) { - struct hnae3_handle *h = hns3_get_handle(netdev); - netdev->priv_flags |= IFF_UNICAST_FLT; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1656,15 +1693,11 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; - - if (!(h->flags & HNAE3_SUPPORT_VF)) - netdev->hw_features |= - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -1836,6 +1869,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) @@ -1843,6 +1877,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc_cb[i].reuse_flag = 0; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); + ring->desc[i].rx.bd_base_info = 0; } static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, @@ -1971,106 +2006,6 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); } -/* hns3_nic_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - */ -static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, - unsigned int max_size) -{ - unsigned char *network; - u8 hlen; - - /* This should never happen, but better safe than sorry */ - if (max_size < ETH_HLEN) - return max_size; - - /* Initialize network frame pointer */ - network = data; - - /* Set first protocol and move network header forward */ - network += ETH_HLEN; - - /* Handle any vlan tag if present */ - if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) - == HNS3_RX_FLAG_VLAN_PRESENT) { - if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) - return max_size; - - network += VLAN_HLEN; - } - - /* Handle L3 protocols */ - if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV4) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct iphdr))) - return max_size; - - /* Access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (network[0] & 0x0F) << 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return network - data; - - /* Record next protocol if header is present */ - } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV6) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct ipv6hdr))) - return max_size; - - /* Record next protocol */ - hlen = sizeof(struct ipv6hdr); - } else { - return network - data; - } - - /* Relocate pointer to start of L4 header */ - network += hlen; - - /* Finally sort out TCP/UDP */ - if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_TCP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct tcphdr))) - return max_size; - - /* Access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (network[12] & 0xF0) >> 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return network - data; - - network += hlen; - } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_UDP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct udphdr))) - return max_size; - - network += sizeof(struct udphdr); - } - - /* If everything has gone correctly network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((typeof(max_size))(network - data) < max_size) - return network - data; - else - return max_size; -} - static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) @@ -2183,6 +2118,39 @@ static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) napi_gro_receive(&ring->tqp_vector->napi, skb); } +static u16 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, u32 l234info) +{ + struct pci_dev *pdev = ring->tqp->handle->pdev; + u16 vlan_tag; + + if (pdev->revision == 0x20) { + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(vlan_tag & VLAN_VID_MASK)) + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return vlan_tag; + } + +#define HNS3_STRP_OUTER_VLAN 0x1 +#define HNS3_STRP_INNER_VLAN 0x2 + + switch (hnae_get_field(l234info, HNS3_RXD_STRP_TAGP_M, + HNS3_RXD_STRP_TAGP_S)) { + case HNS3_STRP_OUTER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + break; + case HNS3_STRP_INNER_VLAN: + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + break; + default: + vlan_tag = 0; + break; + } + + return vlan_tag; +} + static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb, int *out_bnum) { @@ -2202,9 +2170,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetch(desc); - length = le16_to_cpu(desc->rx.pkt_len); + length = le16_to_cpu(desc->rx.size); bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); /* Check valid BD */ if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) @@ -2238,22 +2205,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetchw(skb->data); - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; - - vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); - if (!(vlan_tag & VLAN_VID_MASK)) - vlan_tag = le16_to_cpu(desc->rx.vlan_tag); - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - } - bnum = 1; if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -2270,8 +2221,8 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); - pull_len = hns3_nic_get_headlen(va, l234info, - HNS3_RX_HEAD_SIZE); + pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); @@ -2290,6 +2241,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, *out_bnum = bnum; + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + vlan_tag = hns3_parse_vlan_tag(ring, desc, l234info); + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { netdev_err(netdev, "no valid bd,%016llx,%016llx\n", ((u64 *)desc)[0], ((u64 *)desc)[1]); @@ -3022,8 +2989,6 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) goto out_when_alloc_ring_memory; } - hns3_init_ring_hw(priv->ring_data[i].ring); - u64_stats_init(&priv->ring_data[i].ring->syncp); } @@ -3052,13 +3017,13 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) } /* Set mac addr if it is configured. or leave it to the AE driver */ -static void hns3_init_mac_addr(struct net_device *netdev) +static void hns3_init_mac_addr(struct net_device *netdev, bool init) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; - if (h->ae_algo->ops->get_mac_addr) { + if (h->ae_algo->ops->get_mac_addr && init) { h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); ether_addr_copy(netdev->dev_addr, mac_addr_temp); } @@ -3075,6 +3040,15 @@ static void hns3_init_mac_addr(struct net_device *netdev) } +static void hns3_uninit_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr); +} + static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -3112,7 +3086,7 @@ static int hns3_client_init(struct hnae3_handle *handle) handle->kinfo.netdev = netdev; handle->priv = (void *)priv; - hns3_init_mac_addr(netdev); + hns3_init_mac_addr(netdev, true); hns3_set_default_feature(netdev); @@ -3185,6 +3159,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_force_clear_all_rx_ring(handle); + ret = hns3_nic_uninit_vector_data(priv); if (ret) netdev_err(netdev, "uninit vector error\n"); @@ -3201,6 +3177,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + free_netdev(netdev); } @@ -3298,9 +3276,76 @@ static void hns3_recover_hw_addr(struct net_device *ndev) hns3_nic_mc_sync(ndev, ha->addr); } -static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) +static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) { - dev_kfree_skb_any(skb); + while (ring->next_to_clean != ring->next_to_use) { + ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; + hns3_free_buffer_detach(ring, ring->next_to_clean); + ring_ptr_move_fw(ring, next_to_clean); + } +} + +static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) +{ + struct hns3_desc_cb res_cbs; + int ret; + + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so we need to replace the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + /* if alloc new buffer fail, exit directly + * and reclear in up flow. + */ + netdev_warn(ring->tqp->handle->kinfo.netdev, + "reserve buffer map failed, ret = %d\n", + ret); + return ret; + } + hns3_replace_buffer(ring, ring->next_to_use, + &res_cbs); + } + ring_ptr_move_fw(ring, next_to_use); + } + + return 0; +} + +static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) +{ + while (ring->next_to_use != ring->next_to_clean) { + /* When a buffer is not reused, it's memory has been + * freed in hns3_handle_rx_bd or will be freed by + * stack, so only need to unmap the buffer here. + */ + if (!ring->desc_cb[ring->next_to_use].reuse_flag) { + hns3_unmap_buffer(ring, + &ring->desc_cb[ring->next_to_use]); + ring->desc_cb[ring->next_to_use].dma = 0; + } + + ring_ptr_move_fw(ring, next_to_use); + } +} + +static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *ring; + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_force_clear_rx_ring(ring); + } } static void hns3_clear_all_ring(struct hnae3_handle *h) @@ -3314,14 +3359,55 @@ static void hns3_clear_all_ring(struct hnae3_handle *h) struct hns3_enet_ring *ring; ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, ring->desc_num); + hns3_clear_tx_ring(ring); dev_queue = netdev_get_tx_queue(ndev, priv->ring_data[i].queue_index); netdev_tx_reset_queue(dev_queue); ring = priv->ring_data[i + h->kinfo.num_tqps].ring; - hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); + /* Continue to clear other rings even if clearing some + * rings failed. + */ + hns3_clear_rx_ring(ring); + } +} + +int hns3_nic_reset_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *rx_ring; + int i, j; + int ret; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + h->ae_algo->ops->reset_queue(h, i); + hns3_init_ring_hw(priv->ring_data[i].ring); + + /* We need to clear tx ring here because self test will + * use the ring and will not run down before up + */ + hns3_clear_tx_ring(priv->ring_data[i].ring); + priv->ring_data[i].ring->next_to_clean = 0; + priv->ring_data[i].ring->next_to_use = 0; + + rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_init_ring_hw(rx_ring); + ret = hns3_clear_rx_ring(rx_ring); + if (ret) + return ret; + + /* We can not know the hardware head and tail when this + * function is called in reset flow, so we reuse all desc. + */ + for (j = 0; j < rx_ring->desc_num; j++) + hns3_reuse_buffer(rx_ring, j); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; } + + return 0; } static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) @@ -3359,7 +3445,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_init_mac_addr(netdev); + hns3_init_mac_addr(netdev, false); hns3_nic_set_rx_mode(netdev); hns3_recover_hw_addr(netdev); @@ -3393,7 +3479,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_clear_all_ring(handle); + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); if (ret) { @@ -3409,6 +3495,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) priv->ring_data = NULL; + hns3_uninit_mac_addr(netdev); + return ret; } @@ -3529,8 +3617,6 @@ int hns3_set_channels(struct net_device *netdev, if (if_running) hns3_nic_net_stop(netdev); - hns3_clear_all_ring(h); - ret = hns3_nic_uninit_vector_data(priv); if (ret) { dev_err(&netdev->dev, @@ -3600,6 +3686,8 @@ static int __init hns3_init_module(void) client.ops = &client_ops; + INIT_LIST_HEAD(&client.node); + ret = hnae3_register_client(&client); if (ret) return ret; @@ -3627,3 +3715,4 @@ MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_LICENSE("GPL"); MODULE_ALIAS("pci:hns-nic"); +MODULE_VERSION(HNS3_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 98cdbd3a1163..3b083d5ae9ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -14,6 +14,8 @@ #include "hnae3.h" +#define HNS3_MOD_VERSION "1.0" + extern const char hns3_driver_version[]; enum hns3_nic_state { @@ -102,6 +104,9 @@ enum hns3_nic_state { #define HNS3_RXD_L4ID_S 8 #define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) #define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_STRP_TAGP_S 13 +#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S) + #define HNS3_RXD_L2E_B 16 #define HNS3_RXD_L3E_B 17 #define HNS3_RXD_L4E_B 18 @@ -620,6 +625,7 @@ int hns3_set_channels(struct net_device *netdev, bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); +int hns3_nic_reset_all_ring(struct hnae3_handle *h); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index eb3c34f3cf87..40c0425b4023 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -74,7 +74,7 @@ struct hns3_link_mode_mapping { u32 ethtool_link_mode; }; -static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) +static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en) { struct hnae3_handle *h = hns3_get_handle(ndev); int ret; @@ -85,11 +85,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) switch (loop) { case HNAE3_MAC_INTER_LOOP_MAC: - ret = h->ae_algo->ops->set_loopback(h, loop, true); - break; - case HNAE3_MAC_LOOP_NONE: - ret = h->ae_algo->ops->set_loopback(h, - HNAE3_MAC_INTER_LOOP_MAC, false); + ret = h->ae_algo->ops->set_loopback(h, loop, en); break; default: ret = -ENOTSUPP; @@ -99,10 +95,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) if (ret) return ret; - if (loop == HNAE3_MAC_LOOP_NONE) - h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC); - else - h->ae_algo->ops->set_promisc_mode(h, 1); + h->ae_algo->ops->set_promisc_mode(h, en, en); return ret; } @@ -115,6 +108,10 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) if (!h->ae_algo->ops->start) return -EOPNOTSUPP; + ret = hns3_nic_reset_all_ring(h); + if (ret) + return ret; + ret = h->ae_algo->ops->start(h); if (ret) { netdev_err(ndev, @@ -122,13 +119,13 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) return ret; } - ret = hns3_lp_setup(ndev, loop_mode); + ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); return ret; } -static int hns3_lp_down(struct net_device *ndev) +static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) { struct hnae3_handle *h = hns3_get_handle(ndev); int ret; @@ -136,7 +133,7 @@ static int hns3_lp_down(struct net_device *ndev) if (!h->ae_algo->ops->stop) return -EOPNOTSUPP; - ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE); + ret = hns3_lp_setup(ndev, loop_mode, false); if (ret) { netdev_err(ndev, "lb_setup return error: %d\n", ret); return ret; @@ -332,7 +329,7 @@ static void hns3_self_test(struct net_device *ndev, data[test_index] = hns3_lp_up(ndev, loop_type); if (!data[test_index]) { data[test_index] = hns3_lp_run_test(ndev, loop_type); - hns3_lp_down(ndev); + hns3_lp_down(ndev, loop_type); } if (data[test_index]) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index ff13d1876d9e..c36d64710fa6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -31,6 +31,17 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring) return ring->desc_num - used - 1; } +static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h) +{ + int u = ring->next_to_use; + int c = ring->next_to_clean; + + if (unlikely(h >= ring->desc_num)) + return 0; + + return u > c ? (h > c && h <= u) : (h > c || h <= u); +} + static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclge_desc); @@ -141,6 +152,7 @@ static void hclge_cmd_init_regs(struct hclge_hw *hw) static int hclge_cmd_csq_clean(struct hclge_hw *hw) { + struct hclge_dev *hdev = (struct hclge_dev *)hw->back; struct hclge_cmq_ring *csq = &hw->cmq.csq; u16 ntc = csq->next_to_clean; struct hclge_desc *desc; @@ -149,6 +161,13 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) desc = &csq->desc[ntc]; head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (!is_valid_csq_clean_head(csq, head)) { + dev_warn(&hdev->pdev->dev, "wrong head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + return 0; + } while (head != ntc) { memset(desc, 0, sizeof(*desc)); @@ -171,7 +190,11 @@ static int hclge_cmd_csq_done(struct hclge_hw *hw) static bool hclge_is_special_opcode(u16 opcode) { - u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032}; + /* these commands have several descriptors, + * and use the first one to save opcode and return value + */ + u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -362,9 +385,9 @@ int hclge_cmd_init(struct hclge_dev *hdev) static void hclge_destroy_queue(struct hclge_cmq_ring *ring) { - spin_lock_bh(&ring->lock); + spin_lock(&ring->lock); hclge_free_cmd_desc(ring); - spin_unlock_bh(&ring->lock); + spin_unlock(&ring->lock); } void hclge_destroy_cmd_queue(struct hclge_hw *hw) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ee3cbac6dfaa..d9aaa76c76eb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -115,7 +115,6 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, - HCLGE_OPC_STATS_MAC_TRAFFIC = 0x0314, /* MACSEC command */ /* PFC/Pause CMD*/ @@ -484,6 +483,8 @@ struct hclge_promisc_param { u8 enable; }; +#define HCLGE_PROMISC_TX_EN_B BIT(4) +#define HCLGE_PROMISC_RX_EN_B BIT(5) #define HCLGE_PROMISC_EN_B 1 #define HCLGE_PROMISC_EN_ALL 0x7 #define HCLGE_PROMISC_EN_UC 0x1 @@ -704,11 +705,14 @@ struct hclge_vlan_filter_vf_cfg_cmd { u8 vf_bitmap[16]; }; -#define HCLGE_ACCEPT_TAG_B 0 -#define HCLGE_ACCEPT_UNTAG_B 1 +#define HCLGE_ACCEPT_TAG1_B 0 +#define HCLGE_ACCEPT_UNTAG1_B 1 #define HCLGE_PORT_INS_TAG1_EN_B 2 #define HCLGE_PORT_INS_TAG2_EN_B 3 #define HCLGE_CFG_NIC_ROCE_SEL_B 4 +#define HCLGE_ACCEPT_TAG2_B 5 +#define HCLGE_ACCEPT_UNTAG2_B 6 + struct hclge_vport_vtag_tx_cfg_cmd { u8 vport_vlan_cfg; u8 vf_offset; @@ -813,21 +817,13 @@ struct hclge_reset_cmd { #define HCLGE_NIC_CMQ_DESC_NUM 1024 #define HCLGE_NIC_CMQ_DESC_NUM_S 3 -#define HCLGE_LED_PORT_SPEED_STATE_S 0 -#define HCLGE_LED_PORT_SPEED_STATE_M GENMASK(5, 0) -#define HCLGE_LED_ACTIVITY_STATE_S 0 -#define HCLGE_LED_ACTIVITY_STATE_M GENMASK(1, 0) -#define HCLGE_LED_LINK_STATE_S 0 -#define HCLGE_LED_LINK_STATE_M GENMASK(1, 0) #define HCLGE_LED_LOCATE_STATE_S 0 #define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0) struct hclge_set_led_state_cmd { - u8 port_speed_led_config; - u8 link_led_config; - u8 activity_led_config; + u8 rsv1[3]; u8 locate_led_config; - u8 rsv[20]; + u8 rsv2[20]; }; int hclge_cmd_init(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2066dd734444..d318d35e598f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -39,7 +39,6 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); -static int hclge_update_led_status(struct hclge_dev *hdev); static struct hnae3_ae_algo ae_algo; @@ -304,8 +303,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, - {"mac_tx_8192_12287_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)}, {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, {"mac_tx_9217_12287_oct_pkt_num", @@ -356,8 +353,6 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, - {"mac_rx_8192_12287_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)}, {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, {"mac_rx_9217_12287_oct_pkt_num", @@ -508,38 +503,6 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) return 0; } -static int hclge_mac_get_traffic_stats(struct hclge_dev *hdev) -{ - struct hclge_mac_stats *mac_stats = &hdev->hw_stats.mac_stats; - struct hclge_desc desc; - __le64 *desc_data; - int ret; - - /* for fiber port, need to query the total rx/tx packets statstics, - * used for data transferring checking. - */ - if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return 0; - - if (test_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) - return 0; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_STATS_MAC_TRAFFIC, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get MAC total pkt stats fail, ret = %d\n", ret); - - return ret; - } - - desc_data = (__le64 *)(&desc.data[0]); - mac_stats->mac_tx_total_pkt_num += le64_to_cpu(*desc_data++); - mac_stats->mac_rx_total_pkt_num += le64_to_cpu(*desc_data); - - return 0; -} - static int hclge_mac_update_stats(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 @@ -1459,8 +1422,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) /* We need to alloc a vport for main NIC of PF */ num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; - if (hdev->num_tqps < num_vport) - num_vport = hdev->num_tqps; + if (hdev->num_tqps < num_vport) { + dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", + hdev->num_tqps, num_vport); + return -EINVAL; + } /* Alloc the same number of TQPs for every vport */ tqp_per_vport = hdev->num_tqps / num_vport; @@ -1474,21 +1440,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) hdev->vport = vport; hdev->num_alloc_vport = num_vport; -#ifdef CONFIG_PCI_IOV - /* Enable SRIOV */ - if (hdev->num_req_vfs) { - dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", - hdev->num_req_vfs); - ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); - if (ret) { - hdev->num_alloc_vfs = 0; - dev_err(&pdev->dev, "SRIOV enable failed %d\n", - ret); - return ret; - } - } - hdev->num_alloc_vfs = hdev->num_req_vfs; -#endif + if (IS_ENABLED(CONFIG_PCI_IOV)) + hdev->num_alloc_vfs = hdev->num_req_vfs; for (i = 0; i < num_vport; i++) { vport->back = hdev; @@ -2335,8 +2288,10 @@ static int hclge_mac_init(struct hclge_dev *hdev) struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + struct hclge_vport *vport; int mtu; int ret; + int i; ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); if (ret) { @@ -2348,7 +2303,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; /* Initialize the MTA table work mode */ - hdev->accept_mta_mc = true; hdev->enable_mta = true; hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; @@ -2361,11 +2315,17 @@ static int hclge_mac_init(struct hclge_dev *hdev) return ret; } - ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); - if (ret) { - dev_err(&hdev->pdev->dev, - "set mta filter mode fail ret=%d\n", ret); - return ret; + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->accept_mta_mc = false; + + memset(vport->mta_shadow, 0, sizeof(vport->mta_shadow)); + ret = hclge_cfg_func_mta_filter(hdev, vport->vport_id, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", ret); + return ret; + } } ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); @@ -2597,6 +2557,15 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, } } +static void hclge_clear_all_event_cause(struct hclge_dev *hdev) +{ + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, + BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) | + BIT(HCLGE_VECTOR0_CORERESET_INT_B) | + BIT(HCLGE_VECTOR0_IMPRESET_INT_B)); + hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); +} + static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) { writel(enable ? 1 : 0, vector->addr); @@ -2627,16 +2596,18 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) * mbx messages reported by this interrupt. */ hclge_mbx_task_schedule(hdev); - + break; default: - dev_dbg(&hdev->pdev->dev, - "received unknown or unhandled event of vector0\n"); + dev_warn(&hdev->pdev->dev, + "received unknown or unhandled event of vector0\n"); break; } - /* we should clear the source of interrupt */ - hclge_clear_event_cause(hdev, event_cause, clearval); - hclge_enable_vector(&hdev->misc_vector, true); + /* clear the source of interrupt if it is not cause by reset */ + if (event_cause != HCLGE_VECTOR0_EVENT_RST) { + hclge_clear_event_cause(hdev, event_cause, clearval); + hclge_enable_vector(&hdev->misc_vector, true); + } return IRQ_HANDLED; } @@ -2824,6 +2795,33 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, return rst_level; } +static void hclge_clear_reset_cause(struct hclge_dev *hdev) +{ + u32 clearval = 0; + + switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + break; + case HNAE3_GLOBAL_RESET: + clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + break; + case HNAE3_CORE_RESET: + clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + break; + default: + dev_warn(&hdev->pdev->dev, "Unsupported reset event to clear:%d", + hdev->reset_type); + break; + } + + if (!clearval) + return; + + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, clearval); + hclge_enable_vector(&hdev->misc_vector, true); +} + static void hclge_reset(struct hclge_dev *hdev) { /* perform reset of the stack & ae device for a client */ @@ -2836,6 +2834,8 @@ static void hclge_reset(struct hclge_dev *hdev) hclge_reset_ae_dev(hdev->ae_dev); hclge_notify_client(hdev, HNAE3_INIT_CLIENT); rtnl_unlock(); + + hclge_clear_reset_cause(hdev); } else { /* schedule again to check pending resets later */ set_bit(hdev->reset_type, &hdev->reset_pending); @@ -2930,38 +2930,16 @@ static void hclge_service_task(struct work_struct *work) struct hclge_dev *hdev = container_of(work, struct hclge_dev, service_task); - /* The total rx/tx packets statstics are wanted to be updated - * per second. Both hclge_update_stats_for_all() and - * hclge_mac_get_traffic_stats() can do it. - */ if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { hclge_update_stats_for_all(hdev); hdev->hw_stats.stats_timer = 0; - } else { - hclge_mac_get_traffic_stats(hdev); } hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); - hclge_update_led_status(hdev); hclge_service_complete(hdev); } -static void hclge_disable_sriov(struct hclge_dev *hdev) -{ - /* If our VFs are assigned we cannot shut down SR-IOV - * without causing issues, so just leave the hardware - * available but disabled - */ - if (pci_vfs_assigned(hdev->pdev)) { - dev_warn(&hdev->pdev->dev, - "disabling driver while VFs are assigned\n"); - return; - } - - pci_disable_sriov(hdev->pdev); -} - struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) { /* VF handle has no client */ @@ -3615,7 +3593,14 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, req = (struct hclge_promisc_cfg_cmd *)desc.data; req->vf_id = param->vf_id; - req->flag = (param->enable << HCLGE_PROMISC_EN_B); + + /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on + * pdev revision(0x20), new revision support them. The + * value of this two fields will not return error when driver + * send command to fireware in revision(0x20). + */ + req->flag = (param->enable << HCLGE_PROMISC_EN_B) | + HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -3642,13 +3627,15 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, param->vf_id = vport_id; } -static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) +static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_promisc_param param; - hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, + vport->vport_id); hclge_cmd_set_promisc_mode(hdev, ¶m); } @@ -3683,48 +3670,50 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) "mac enable fail, ret =%d.\n", ret); } -static int hclge_set_loopback(struct hnae3_handle *handle, - enum hnae3_loop loop_mode, bool en) +static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) { - struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_config_mac_mode_cmd *req; - struct hclge_dev *hdev = vport->back; struct hclge_desc desc; u32 loop_en; int ret; - switch (loop_mode) { - case HNAE3_MAC_INTER_LOOP_MAC: - req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; - /* 1 Read out the MAC mode config at first */ - hclge_cmd_setup_basic_desc(&desc, - HCLGE_OPC_CONFIG_MAC_MODE, - true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac loopback get fail, ret =%d.\n", - ret); - return ret; - } + req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; + /* 1 Read out the MAC mode config at first */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback get fail, ret =%d.\n", ret); + return ret; + } - /* 2 Then setup the loopback flag */ - loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - if (en) - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1); - else - hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + /* 2 Then setup the loopback flag */ + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); - req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); - /* 3 Config mac work mode with loopback flag - * and its original configure parameters - */ - hclge_cmd_reuse_desc(&desc, false); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "mac loopback set fail, ret =%d.\n", ret); + /* 3 Config mac work mode with loopback flag + * and its original configure parameters + */ + hclge_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + switch (loop_mode) { + case HNAE3_MAC_INTER_LOOP_MAC: + ret = hclge_set_mac_loopback(hdev, en); break; default: ret = -ENOTSUPP; @@ -3783,13 +3772,11 @@ static int hclge_ae_start(struct hnae3_handle *handle) hclge_cfg_mac_mode(hdev, true); clear_bit(HCLGE_STATE_DOWN, &hdev->state); mod_timer(&hdev->service_timer, jiffies + HZ); + hdev->hw.mac.link = 0; /* reset tqp stats */ hclge_reset_tqp_stats(handle); - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) - return 0; - ret = hclge_mac_start_phy(hdev); if (ret) return ret; @@ -3805,9 +3792,12 @@ static void hclge_ae_stop(struct hnae3_handle *handle) del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + hclge_mac_stop_phy(hdev); return; + } for (i = 0; i < vport->alloc_tqps; i++) hclge_tqp_enable(hdev, i, 0, false); @@ -3819,6 +3809,8 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); hclge_update_link_status(hdev); } @@ -4029,9 +4021,88 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, return ret; } + if (enable) + set_bit(idx, vport->mta_shadow); + else + clear_bit(idx, vport->mta_shadow); + return 0; } +static int hclge_update_mta_status(struct hnae3_handle *handle) +{ + unsigned long mta_status[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; + struct hclge_vport *vport = hclge_get_vport(handle); + struct net_device *netdev = handle->kinfo.netdev; + struct netdev_hw_addr *ha; + u16 tbl_idx; + + memset(mta_status, 0, sizeof(mta_status)); + + /* update mta_status from mc addr list */ + netdev_for_each_mc_addr(ha, netdev) { + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, ha->addr); + set_bit(tbl_idx, mta_status); + } + + return hclge_update_mta_status_common(vport, mta_status, + 0, HCLGE_MTA_TBL_SIZE, true); +} + +int hclge_update_mta_status_common(struct hclge_vport *vport, + unsigned long *status, + u16 idx, + u16 count, + bool update_filter) +{ + struct hclge_dev *hdev = vport->back; + u16 update_max = idx + count; + u16 check_max; + int ret = 0; + bool used; + u16 i; + + /* setup mta check range */ + if (update_filter) { + i = 0; + check_max = HCLGE_MTA_TBL_SIZE; + } else { + i = idx; + check_max = update_max; + } + + used = false; + /* check and update all mta item */ + for (; i < check_max; i++) { + /* ignore unused item */ + if (!test_bit(i, vport->mta_shadow)) + continue; + + /* if i in update range then update it */ + if (i >= idx && i < update_max) + if (!test_bit(i - idx, status)) + hclge_set_mta_table_item(vport, i, false); + + if (!used && test_bit(i, vport->mta_shadow)) + used = true; + } + + /* no longer use mta, disable it */ + if (vport->accept_mta_mc && update_filter && !used) { + ret = hclge_cfg_func_mta_filter(hdev, + vport->vport_id, + false); + if (ret) + dev_err(&hdev->pdev->dev, + "disable func mta filter fail ret=%d\n", + ret); + else + vport->accept_mta_mc = false; + } + + return ret; +} + static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd *req) { @@ -4299,9 +4370,25 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } - /* Set MTA table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, true); + /* If mc mac vlan table is full, use MTA table */ + if (status == -ENOSPC) { + if (!vport->accept_mta_mc) { + status = hclge_cfg_func_mta_filter(hdev, + vport->vport_id, + true); + if (status) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", + status); + return status; + } + vport->accept_mta_mc = true; + } + + /* Set MTA table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, true); + } return status; } @@ -4321,7 +4408,6 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; struct hclge_desc desc[3]; - u16 tbl_idx; /* mac addr check */ if (!is_multicast_ether_addr(addr)) { @@ -4350,17 +4436,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, status = hclge_add_mac_vlan_tbl(vport, &req, desc); } else { - /* This mac addr do not exist, can't delete it */ - dev_err(&hdev->pdev->dev, - "Rm multicast mac addr failed, ret = %d.\n", - status); - return -EIO; + /* Maybe this mac address is in mta table, but it cannot be + * deleted here because an entry of mta represents an address + * range rather than a specific address. the delete action to + * all entries will take effect in update_mta_status called by + * hns3_nic_set_rx_mode. + */ + status = 0; } - /* Set MTB table for this MAC address */ - tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); - status = hclge_set_mta_table_item(vport, tbl_idx, false); - return status; } @@ -4540,8 +4624,9 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); } -int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, - bool is_kill, u16 vlan, u8 qos, __be16 proto) +static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, + bool is_kill, u16 vlan, u8 qos, + __be16 proto) { #define HCLGE_MAX_VF_BYTES 16 struct hclge_vlan_filter_vf_cfg_cmd *req0; @@ -4581,9 +4666,16 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, } if (!is_kill) { +#define HCLGE_VF_VLAN_NO_ENTRY 2 if (!req0->resp_code || req0->resp_code == 1) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) { + dev_warn(&hdev->pdev->dev, + "vf vlan table is full, vf vlan filter is disabled\n"); + return 0; + } + dev_err(&hdev->pdev->dev, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); @@ -4599,12 +4691,9 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, return -EIO; } -static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, - __be16 proto, u16 vlan_id, - bool is_kill) +static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, + u16 vlan_id, bool is_kill) { - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; struct hclge_vlan_filter_pf_cfg_cmd *req; struct hclge_desc desc; u8 vlan_offset_byte_val; @@ -4624,22 +4713,66 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", ret); + return ret; +} + +static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, + u16 vport_id, u16 vlan_id, u8 qos, + bool is_kill) +{ + u16 vport_idx, vport_num = 0; + int ret; + + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, + 0, proto); if (ret) { dev_err(&hdev->pdev->dev, - "port vlan command, send fail, ret =%d.\n", - ret); + "Set %d vport vlan filter config fail, ret =%d.\n", + vport_id, ret); return ret; } - ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); - if (ret) { + /* vlan 0 may be added twice when 8021q module is enabled */ + if (!is_kill && !vlan_id && + test_bit(vport_id, hdev->vlan_table[vlan_id])) + return 0; + + if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { dev_err(&hdev->pdev->dev, - "Set pf vlan filter config fail, ret =%d.\n", - ret); - return -EIO; + "Add port vlan failed, vport %d is already in vlan %d\n", + vport_id, vlan_id); + return -EINVAL; } - return 0; + if (is_kill && + !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { + dev_err(&hdev->pdev->dev, + "Delete port vlan failed, vport %d is not in vlan %d\n", + vport_id, vlan_id); + return -EINVAL; + } + + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) + vport_num++; + + if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) + ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, + is_kill); + + return ret; +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, + 0, is_kill); } static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, @@ -4653,7 +4786,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, if (proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT; - return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); + return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); } static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) @@ -4669,10 +4802,14 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, - vcfg->accept_tag ? 1 : 0); - hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, - vcfg->accept_untag ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B, + vcfg->accept_tag1 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B, + vcfg->accept_untag1 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B, + vcfg->accept_tag2 ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B, + vcfg->accept_untag2 ? 1 : 0); hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, vcfg->insert_tag1_en ? 1 : 0); hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, @@ -4796,8 +4933,18 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag = true; - vport->txvlan_cfg.accept_untag = true; + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them. The + * value of this two fields will not return error when driver + * send command to fireware in revision(0x20). + * This two fields can not configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag1_en = false; vport->txvlan_cfg.insert_tag2_en = false; vport->txvlan_cfg.default_tag1 = 0; @@ -4818,10 +4965,10 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) } handle = &hdev->vport[0].nic; - return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); + return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } -static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -5166,12 +5313,6 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, struct phy_device *phydev = hdev->hw.mac.phydev; u32 fc_autoneg; - /* Only support flow control negotiation for netdev with - * phy attached for now. - */ - if (!phydev) - return -EOPNOTSUPP; - fc_autoneg = hclge_get_autoneg(handle); if (auto_neg != fc_autoneg) { dev_info(&hdev->pdev->dev, @@ -5190,6 +5331,12 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, if (!fc_autoneg) return hclge_cfg_pauseparam(hdev, rx_en, tx_en); + /* Only support flow control negotiation for netdev with + * phy attached for now. + */ + if (!phydev) + return -EOPNOTSUPP; + return phy_start_aneg(phydev); } @@ -5282,7 +5429,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->nic.client = client; ret = client->ops->init_instance(&vport->nic); if (ret) - goto err; + return ret; if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { @@ -5290,11 +5437,11 @@ static int hclge_init_client_instance(struct hnae3_client *client, ret = hclge_init_roce_base_info(vport); if (ret) - goto err; + return ret; ret = rc->ops->init_instance(&vport->roce); if (ret) - goto err; + return ret; } break; @@ -5304,7 +5451,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, ret = client->ops->init_instance(&vport->nic); if (ret) - goto err; + return ret; break; case HNAE3_CLIENT_ROCE: @@ -5316,18 +5463,16 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) - goto err; + return ret; ret = client->ops->init_instance(&vport->roce); if (ret) - goto err; + return ret; } } } return 0; -err: - return ret; } static void hclge_uninit_client_instance(struct hnae3_client *client, @@ -5364,7 +5509,7 @@ static int hclge_pci_init(struct hclge_dev *hdev) ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); - goto err_no_drvdata; + return ret; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -5402,8 +5547,6 @@ err_clr_master: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); -err_no_drvdata: - pci_set_drvdata(pdev, NULL); return ret; } @@ -5412,6 +5555,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + pcim_iounmap(pdev, hdev->hw.io_base); pci_free_irq_vectors(pdev); pci_clear_master(pdev); pci_release_mem_regions(pdev); @@ -5427,7 +5571,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) { ret = -ENOMEM; - goto err_hclge_dev; + goto out; } hdev->pdev = pdev; @@ -5440,38 +5584,38 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_pci_init(hdev); if (ret) { dev_err(&pdev->dev, "PCI init failed\n"); - goto err_pci_init; + goto out; } /* Firmware command queue initialize */ ret = hclge_cmd_queue_init(hdev); if (ret) { dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); - return ret; + goto err_pci_uninit; } /* Firmware command initialize */ ret = hclge_cmd_init(hdev); if (ret) - goto err_cmd_init; + goto err_cmd_uninit; ret = hclge_get_cap(hdev); if (ret) { dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", ret); - return ret; + goto err_cmd_uninit; } ret = hclge_configure(hdev); if (ret) { dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); - return ret; + goto err_cmd_uninit; } ret = hclge_init_msi(hdev); if (ret) { dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); - return ret; + goto err_cmd_uninit; } ret = hclge_misc_irq_init(hdev); @@ -5479,69 +5623,71 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) dev_err(&pdev->dev, "Misc IRQ(vector0) init error, ret = %d.\n", ret); - return ret; + goto err_msi_uninit; } ret = hclge_alloc_tqps(hdev); if (ret) { dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); - return ret; + goto err_msi_irq_uninit; } ret = hclge_alloc_vport(hdev); if (ret) { dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); - return ret; + goto err_msi_irq_uninit; } ret = hclge_map_tqp(hdev); if (ret) { dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); - return ret; + goto err_msi_irq_uninit; } - ret = hclge_mac_mdio_config(hdev); - if (ret) { - dev_warn(&hdev->pdev->dev, - "mdio config fail ret=%d\n", ret); - return ret; + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + ret = hclge_mac_mdio_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio config fail ret=%d\n", ret); + goto err_msi_irq_uninit; + } } ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); if (ret) { dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = hclge_tm_schd_init(hdev); if (ret) { dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } hclge_rss_init_cfg(hdev); ret = hclge_rss_init_hw(hdev); if (ret) { dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } ret = init_mgr_tbl(hdev); if (ret) { dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret); - return ret; + goto err_mdiobus_unreg; } hclge_dcb_ops_set(hdev); @@ -5551,6 +5697,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); + hclge_clear_all_event_cause(hdev); + /* Enable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, true); @@ -5564,11 +5712,21 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; -err_cmd_init: +err_mdiobus_unreg: + if (hdev->hw.mac.phydev) + mdiobus_unregister(hdev->hw.mac.mdio_bus); +err_msi_irq_uninit: + hclge_misc_irq_uninit(hdev); +err_msi_uninit: + pci_free_irq_vectors(pdev); +err_cmd_uninit: + hclge_destroy_cmd_queue(&hdev->hw); +err_pci_uninit: + pcim_iounmap(pdev, hdev->hw.io_base); + pci_clear_master(pdev); pci_release_regions(pdev); -err_pci_init: - pci_set_drvdata(pdev, NULL); -err_hclge_dev: + pci_disable_device(pdev); +out: return ret; } @@ -5586,6 +5744,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); hclge_stats_clear(hdev); + memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); ret = hclge_cmd_init(hdev); if (ret) { @@ -5642,9 +5801,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - /* Enable MISC vector(vector0) */ - hclge_enable_vector(&hdev->misc_vector, true); - dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -5658,9 +5814,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); - if (IS_ENABLED(CONFIG_PCI_IOV)) - hclge_disable_sriov(hdev); - if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); if (hdev->service_task.func) @@ -5675,6 +5828,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) /* Disable MISC vector(vector0) */ hclge_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); + hclge_destroy_cmd_queue(&hdev->hw); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); @@ -5985,9 +6140,7 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, "Get 64 bit register failed, ret = %d.\n", ret); } -static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, - u8 act_led_status, u8 link_led_status, - u8 locate_led_status) +static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) { struct hclge_set_led_state_cmd *req; struct hclge_desc desc; @@ -5996,12 +6149,6 @@ static int hclge_set_led_status_sfp(struct hclge_dev *hdev, u8 speed_led_status, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false); req = (struct hclge_set_led_state_cmd *)desc.data; - hnae_set_field(req->port_speed_led_config, HCLGE_LED_PORT_SPEED_STATE_M, - HCLGE_LED_PORT_SPEED_STATE_S, speed_led_status); - hnae_set_field(req->link_led_config, HCLGE_LED_ACTIVITY_STATE_M, - HCLGE_LED_ACTIVITY_STATE_S, act_led_status); - hnae_set_field(req->activity_led_config, HCLGE_LED_LINK_STATE_M, - HCLGE_LED_LINK_STATE_S, link_led_status); hnae_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M, HCLGE_LED_LOCATE_STATE_S, locate_led_status); @@ -6022,105 +6169,17 @@ enum hclge_led_status { static int hclge_set_led_id(struct hnae3_handle *handle, enum ethtool_phys_id_state status) { -#define BLINK_FREQUENCY 2 struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - int ret = 0; - - if (phydev || hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return -EOPNOTSUPP; switch (status) { case ETHTOOL_ID_ACTIVE: - ret = hclge_set_led_status_sfp(hdev, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_ON); - break; + return hclge_set_led_status(hdev, HCLGE_LED_ON); case ETHTOOL_ID_INACTIVE: - ret = hclge_set_led_status_sfp(hdev, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_NO_CHANGE, - HCLGE_LED_OFF); - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -enum hclge_led_port_speed { - HCLGE_SPEED_LED_FOR_1G, - HCLGE_SPEED_LED_FOR_10G, - HCLGE_SPEED_LED_FOR_25G, - HCLGE_SPEED_LED_FOR_40G, - HCLGE_SPEED_LED_FOR_50G, - HCLGE_SPEED_LED_FOR_100G, -}; - -static u8 hclge_led_get_speed_status(u32 speed) -{ - u8 speed_led; - - switch (speed) { - case HCLGE_MAC_SPEED_1G: - speed_led = HCLGE_SPEED_LED_FOR_1G; - break; - case HCLGE_MAC_SPEED_10G: - speed_led = HCLGE_SPEED_LED_FOR_10G; - break; - case HCLGE_MAC_SPEED_25G: - speed_led = HCLGE_SPEED_LED_FOR_25G; - break; - case HCLGE_MAC_SPEED_40G: - speed_led = HCLGE_SPEED_LED_FOR_40G; - break; - case HCLGE_MAC_SPEED_50G: - speed_led = HCLGE_SPEED_LED_FOR_50G; - break; - case HCLGE_MAC_SPEED_100G: - speed_led = HCLGE_SPEED_LED_FOR_100G; - break; + return hclge_set_led_status(hdev, HCLGE_LED_OFF); default: - speed_led = HCLGE_LED_NO_CHANGE; + return -EINVAL; } - - return speed_led; -} - -static int hclge_update_led_status(struct hclge_dev *hdev) -{ - u8 port_speed_status, link_status, activity_status; - u64 rx_pkts, tx_pkts; - - if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) - return 0; - - port_speed_status = hclge_led_get_speed_status(hdev->hw.mac.speed); - - rx_pkts = hdev->hw_stats.mac_stats.mac_rx_total_pkt_num; - tx_pkts = hdev->hw_stats.mac_stats.mac_tx_total_pkt_num; - if (rx_pkts != hdev->rx_pkts_for_led || - tx_pkts != hdev->tx_pkts_for_led) - activity_status = HCLGE_LED_ON; - else - activity_status = HCLGE_LED_OFF; - hdev->rx_pkts_for_led = rx_pkts; - hdev->tx_pkts_for_led = tx_pkts; - - if (hdev->hw.mac.link) - link_status = HCLGE_LED_ON; - else - link_status = HCLGE_LED_OFF; - - return hclge_set_led_status_sfp(hdev, port_speed_status, - activity_status, link_status, - HCLGE_LED_NO_CHANGE); } static void hclge_get_link_mode(struct hnae3_handle *handle, @@ -6190,6 +6249,7 @@ static const struct hnae3_ae_ops hclge_ops = { .rm_uc_addr = hclge_rm_uc_addr, .add_mc_addr = hclge_add_mc_addr, .rm_mc_addr = hclge_rm_mc_addr, + .update_mta_status = hclge_update_mta_status, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, @@ -6203,7 +6263,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_fw_version = hclge_get_fw_version, .get_mdix_mode = hclge_get_mdix_mode, .enable_vlan_filter = hclge_enable_vlan_filter, - .set_vlan_filter = hclge_set_port_vlan_filter, + .set_vlan_filter = hclge_set_vlan_filter, .set_vf_vlan_filter = hclge_set_vf_vlan_filter, .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, @@ -6228,7 +6288,9 @@ static int hclge_init(void) { pr_info("%s is initializing\n", HCLGE_NAME); - return hnae3_register_ae_algo(&ae_algo); + hnae3_register_ae_algo(&ae_algo); + + return 0; } static void hclge_exit(void) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 0f4157e71282..7488534528cd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -12,10 +12,12 @@ #include <linux/fs.h> #include <linux/types.h> #include <linux/phy.h> +#include <linux/if_vlan.h> + #include "hclge_cmd.h" #include "hnae3.h" -#define HCLGE_MOD_VERSION "v1.0" +#define HCLGE_MOD_VERSION "1.0" #define HCLGE_DRIVER_NAME "hclge" #define HCLGE_INVALID_VPORT 0xffff @@ -59,6 +61,8 @@ #define HCLGE_RSS_TC_SIZE_6 64 #define HCLGE_RSS_TC_SIZE_7 128 +#define HCLGE_MTA_TBL_SIZE 4096 + #define HCLGE_TQP_RESET_TRY_TIMES 10 #define HCLGE_PHY_PAGE_MDIX 0 @@ -406,9 +410,9 @@ struct hclge_mac_stats { u64 mac_tx_1519_2047_oct_pkt_num; u64 mac_tx_2048_4095_oct_pkt_num; u64 mac_tx_4096_8191_oct_pkt_num; - u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */ - u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ - u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */ + u64 rsv0; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; u64 mac_tx_12288_16383_oct_pkt_num; u64 mac_tx_1519_max_good_oct_pkt_num; u64 mac_tx_1519_max_bad_oct_pkt_num; @@ -433,9 +437,9 @@ struct hclge_mac_stats { u64 mac_rx_1519_2047_oct_pkt_num; u64 mac_rx_2048_4095_oct_pkt_num; u64 mac_rx_4096_8191_oct_pkt_num; - u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */ - u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ - u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 rsv1; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; u64 mac_rx_12288_16383_oct_pkt_num; u64 mac_rx_1519_max_good_oct_pkt_num; u64 mac_rx_1519_max_bad_oct_pkt_num; @@ -471,6 +475,7 @@ struct hclge_vlan_type_cfg { u16 tx_in_vlan_type; }; +#define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -556,18 +561,18 @@ struct hclge_dev { enum hclge_mta_dmac_sel_type mta_mac_sel_type; bool enable_mta; /* Mutilcast filter enable */ - bool accept_mta_mc; /* Whether accept mta filter multicast */ struct hclge_vlan_type_cfg vlan_type_cfg; - u64 rx_pkts_for_led; - u64 tx_pkts_for_led; + unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)]; }; /* VPort level vlan tag configuration for TX direction */ struct hclge_tx_vtag_cfg { - bool accept_tag; /* Whether accept tagged packet from host */ - bool accept_untag; /* Whether accept untagged packet from host */ + bool accept_tag1; /* Whether accept tag1 packet from host */ + bool accept_untag1; /* Whether accept untag1 packet from host */ + bool accept_tag2; + bool accept_untag2; bool insert_tag1_en; /* Whether insert inner vlan tag */ bool insert_tag2_en; /* Whether insert outer vlan tag */ u16 default_tag1; /* The default inner vlan tag to insert */ @@ -616,6 +621,9 @@ struct hclge_vport { struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; + + bool accept_mta_mc; /* whether to accept mta filter multicast */ + unsigned long mta_shadow[BITS_TO_LONGS(HCLGE_MTA_TBL_SIZE)]; }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -633,6 +641,12 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, u8 func_id, bool enable); +int hclge_update_mta_status_common(struct hclge_vport *vport, + unsigned long *status, + u16 idx, + u16 count, + bool update_filter); + struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); int hclge_bind_ring_with_vector(struct hclge_vport *vport, int vector_id, bool en, @@ -646,8 +660,9 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) } int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); -int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, - bool is_kill, u16 vlan, u8 qos, __be16 proto); +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); +int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable); int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a6f7ffa9c259..7541cb9b71ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -190,11 +190,12 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { - bool en = req->msg[1] ? true : false; + bool en_uc = req->msg[1] ? true : false; + bool en_mc = req->msg[2] ? true : false; struct hclge_promisc_param param; /* always enable broadcast promisc bit */ - hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id); return hclge_cmd_set_promisc_mode(vport->back, ¶m); } @@ -230,12 +231,51 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return 0; } +static int hclge_set_vf_mc_mta_status(struct hclge_vport *vport, + u8 *msg, u8 idx, bool is_end) +{ +#define HCLGE_MTA_STATUS_MSG_SIZE 13 +#define HCLGE_MTA_STATUS_MSG_BITS \ + (HCLGE_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) +#define HCLGE_MTA_STATUS_MSG_END_BITS \ + (HCLGE_MTA_TBL_SIZE % HCLGE_MTA_STATUS_MSG_BITS) + unsigned long status[BITS_TO_LONGS(HCLGE_MTA_STATUS_MSG_BITS)]; + u16 tbl_cnt; + u16 tbl_idx; + u8 msg_ofs; + u8 msg_bit; + + tbl_cnt = is_end ? HCLGE_MTA_STATUS_MSG_END_BITS : + HCLGE_MTA_STATUS_MSG_BITS; + + /* set msg field */ + msg_ofs = 0; + msg_bit = 0; + memset(status, 0, sizeof(status)); + for (tbl_idx = 0; tbl_idx < tbl_cnt; tbl_idx++) { + if (msg[msg_ofs] & BIT(msg_bit)) + set_bit(tbl_idx, status); + + msg_bit++; + if (msg_bit == BITS_PER_BYTE) { + msg_bit = 0; + msg_ofs++; + } + } + + return hclge_update_mta_status_common(vport, + status, idx * HCLGE_MTA_STATUS_MSG_BITS, + tbl_cnt, is_end); +} + static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); struct hclge_dev *hdev = vport->back; + u8 resp_len = 0; + u8 resp_data; int status; if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { @@ -247,6 +287,22 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, bool enable = mbx_req->msg[2]; status = hclge_cfg_func_mta_filter(hdev, func_id, enable); + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ) { + resp_data = hdev->mta_mac_sel_type; + resp_len = sizeof(u8); + gen_resp = true; + status = 0; + } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE) { + /* mta status update msg format + * msg[2.6 : 2.0] msg index + * msg[2.7] msg is end + * msg[15 : 3] mta status bits[103 : 0] + */ + bool is_end = (mbx_req->msg[2] & 0x80) ? true : false; + + status = hclge_set_vf_mc_mta_status(vport, &mbx_req->msg[3], + mbx_req->msg[2] & 0x7F, + is_end); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", @@ -255,7 +311,8 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, } if (gen_resp) - hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); + hclge_gen_resp_to_vf(vport, mbx_req, status, + &resp_data, resp_len); return 0; } @@ -264,19 +321,23 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { - struct hclge_dev *hdev = vport->back; int status = 0; if (mbx_req->msg[1] == HCLGE_MBX_VLAN_FILTER) { + struct hnae3_handle *handle = &vport->nic; u16 vlan, proto; bool is_kill; is_kill = !!mbx_req->msg[2]; memcpy(&vlan, &mbx_req->msg[3], sizeof(vlan)); memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); - status = hclge_set_vf_vlan_common(hdev, vport->vport_id, - is_kill, vlan, 0, - cpu_to_be16(proto)); + status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), + vlan, is_kill); + } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { + struct hnae3_handle *handle = &vport->nic; + bool en = mbx_req->msg[2] ? true : false; + + status = hclge_en_hw_strip_rxvtag(handle, en); } if (gen_resp) @@ -378,6 +439,13 @@ static void hclge_reset_vf(struct hclge_vport *vport, hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); } +static bool hclge_cmd_crq_empty(struct hclge_hw *hw) +{ + u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); + + return tail == hw->cmq.crq.next_to_use; +} + void hclge_mbx_handler(struct hclge_dev *hdev) { struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq; @@ -386,12 +454,23 @@ void hclge_mbx_handler(struct hclge_dev *hdev) struct hclge_desc *desc; int ret, flag; - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); /* handle all the mailbox requests in the queue */ - while (hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B)) { + while (!hclge_cmd_crq_empty(&hdev->hw)) { desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + if (unlikely(!hnae_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %d\n", + req->msg[0]); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + vport = &hdev->vport[req->mbx_src_vfid]; switch (req->msg[0]) { @@ -466,7 +545,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev) } crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 682c2d6618e7..9f7932e423b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -140,8 +140,11 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) struct mii_bus *mdio_bus; int ret; - if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) - return 0; + if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", + hdev->hw.mac.phy_addr); + return -EINVAL; + } mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); if (!mdio_bus) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 885f25cd7be4..262c125f8137 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -134,11 +134,8 @@ static int hclge_pfc_stats_get(struct hclge_dev *hdev, } ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); - if (ret) { - dev_err(&hdev->pdev->dev, - "Get pfc pause stats fail, ret = %d.\n", ret); + if (ret) return ret; - } for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { struct hclge_pfc_stats_cmd *pfc_stats = @@ -503,7 +500,8 @@ static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) +static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, + u32 bit_map) { struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; struct hclge_desc desc; @@ -514,9 +512,8 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; bp_to_qs_map_cmd->tc_id = tc; - - /* Qset and tc is one by one mapping */ - bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc); + bp_to_qs_map_cmd->qs_group_id = grp_id; + bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -1170,6 +1167,41 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) hdev->tm_info.hw_pfc_map); } +/* Each Tc has a 1024 queue sets to backpress, it divides to + * 32 group, each group contains 32 queue sets, which can be + * represented by u32 bitmap. + */ +static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) +{ + struct hclge_vport *vport = hdev->vport; + u32 i, k, qs_bitmap; + int ret; + + for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { + qs_bitmap = 0; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + u16 qs_id = vport->qs_offset + tc; + u8 grp, sub_grp; + + grp = hnae_get_field(qs_id, HCLGE_BP_GRP_ID_M, + HCLGE_BP_GRP_ID_S); + sub_grp = hnae_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, + HCLGE_BP_SUB_GRP_ID_S); + if (i == grp) + qs_bitmap |= (1 << sub_grp); + + vport++; + } + + ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); + if (ret) + return ret; + } + + return 0; +} + static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) { bool tx_en, rx_en; @@ -1221,7 +1253,7 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_tm_qs_bp_cfg(hdev, i); + ret = hclge_bp_setup_hw(hdev, i); if (ret) return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 2dbe177581e9..c2b6e8a6700f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -89,6 +89,11 @@ struct hclge_pg_shapping_cmd { __le32 pg_shapping_para; }; +#define HCLGE_BP_GRP_NUM 32 +#define HCLGE_BP_SUB_GRP_ID_S 0 +#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0) +#define HCLGE_BP_GRP_ID_S 5 +#define HCLGE_BP_GRP_ID_M GENMASK(9, 5) struct hclge_bp_to_qs_map_cmd { u8 tc_id; u8 rsvd[2]; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 2b8426412cc9..a17872aab168 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -654,7 +654,8 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) return 0; } -static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) +static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, + bool en_uc_pmc, bool en_mc_pmc) { struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; @@ -664,7 +665,8 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; - req->msg[1] = en; + req->msg[1] = en_uc_pmc ? 1 : 0; + req->msg[2] = en_mc_pmc ? 1 : 0; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) @@ -674,11 +676,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, u32 en) return status; } -static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, u32 en) +static void hclgevf_set_promisc_mode(struct hnae3_handle *handle, + bool en_uc_pmc, bool en_mc_pmc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - hclgevf_cmd_set_promisc_mode(hdev, en); + hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); } static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, @@ -725,15 +728,124 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) } } -static int hclgevf_cfg_func_mta_filter(struct hnae3_handle *handle, bool en) +static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev) { + u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ, + NULL, 0, true, &resp_msg, sizeof(u8)); + + if (ret) { + dev_err(&hdev->pdev->dev, + "Read mta type fail, ret=%d.\n", ret); + return ret; + } + + if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) { + dev_err(&hdev->pdev->dev, + "Read mta type invalid, resp=%d.\n", resp_msg); + return -EINVAL; + } + + hdev->mta_mac_sel_type = resp_msg; + + return 0; +} + +static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev, + const u8 *addr) +{ + u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type; + u16 high_val = addr[1] | (addr[0] << 8); + + return (high_val >> rsh) & 0xfff; +} + +static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev, + unsigned long *status) +{ +#define HCLGEVF_MTA_STATUS_MSG_SIZE 13 +#define HCLGEVF_MTA_STATUS_MSG_BITS \ + (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE) +#define HCLGEVF_MTA_STATUS_MSG_END_BITS \ + (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS) + u16 tbl_cnt; + u16 tbl_idx; + u8 msg_cnt; + u8 msg_idx; + int ret; + + msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE, + HCLGEVF_MTA_STATUS_MSG_BITS); + tbl_idx = 0; + msg_idx = 0; + while (msg_cnt--) { + u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1]; + u8 *p = &msg[1]; + u8 msg_ofs; + u8 msg_bit; + + memset(msg, 0, sizeof(msg)); + + /* set index field */ + msg[0] = 0x7F & msg_idx; + + /* set end flag field */ + if (msg_cnt == 0) { + msg[0] |= 0x80; + tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS; + } else { + tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS; + } + + /* set status field */ + msg_ofs = 0; + msg_bit = 0; + while (tbl_cnt--) { + if (test_bit(tbl_idx, status)) + p[msg_ofs] |= BIT(msg_bit); + + tbl_idx++; + + msg_bit++; + if (msg_bit == BITS_PER_BYTE) { + msg_bit = 0; + msg_ofs++; + } + } + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, + HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE, + msg, sizeof(msg), false, NULL, 0); + if (ret) + break; + + msg_idx++; + } + + return ret; +} + +static int hclgevf_update_mta_status(struct hnae3_handle *handle) +{ + unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)]; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - u8 msg[2] = {0}; + struct net_device *netdev = hdev->nic.kinfo.netdev; + struct netdev_hw_addr *ha; + u16 tbl_idx; - msg[0] = en; - return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST, - HCLGE_MBX_MAC_VLAN_MC_FUNC_MTA_ENABLE, - msg, 1, false, NULL, 0); + /* clear status */ + memset(mta_status, 0, sizeof(mta_status)); + + /* update status from mc addr list */ + netdev_for_each_mc_addr(ha, netdev) { + tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr); + set_bit(tbl_idx, mta_status); + } + + return hclgevf_do_update_mta_status(hdev, mta_status); } static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) @@ -830,6 +942,17 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); } +static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data; + + msg_data = enable ? 1 : 0; + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data, + 1, false, NULL, 0); +} + static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -1323,6 +1446,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) hclgevf_reset_tqp_stats(handle); del_timer_sync(&hdev->service_timer); cancel_work_sync(&hdev->service_task); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); hclgevf_update_link_status(hdev, 0); } @@ -1441,6 +1565,8 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) return ret; } + hclgevf_clear_event_cause(hdev, 0); + /* enable misc. vector(vector 0) */ hclgevf_enable_vector(&hdev->misc_vector, true); @@ -1451,6 +1577,7 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) { /* disable misc vector(vector 0) */ hclgevf_enable_vector(&hdev->misc_vector, false); + synchronize_irq(hdev->misc_vector.vector_irq); free_irq(hdev->misc_vector.vector_irq, hdev); hclgevf_free_vector(hdev, 0); } @@ -1489,10 +1616,12 @@ static int hclgevf_init_instance(struct hclgevf_dev *hdev, return ret; break; case HNAE3_CLIENT_ROCE: - hdev->roce_client = client; - hdev->roce.client = client; + if (hnae3_dev_roce_supported(hdev)) { + hdev->roce_client = client; + hdev->roce.client = client; + } - if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { + if (hdev->roce_client && hdev->nic_client) { ret = hclgevf_init_roce_base_info(hdev); if (ret) return ret; @@ -1552,7 +1681,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); - goto err_no_drvdata; + return ret; } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); @@ -1584,8 +1713,7 @@ err_clr_master: pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); -err_no_drvdata: - pci_set_drvdata(pdev, NULL); + return ret; } @@ -1597,7 +1725,6 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) pci_clear_master(pdev); pci_release_regions(pdev); pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); } static int hclgevf_init_hdev(struct hclgevf_dev *hdev) @@ -1625,6 +1752,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) hclgevf_state_init(hdev); + ret = hclgevf_cmd_init(hdev); + if (ret) + goto err_cmd_init; + ret = hclgevf_misc_irq_init(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", @@ -1632,10 +1763,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_misc_irq_init; } - ret = hclgevf_cmd_init(hdev); - if (ret) - goto err_cmd_init; - ret = hclgevf_configure(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); @@ -1654,12 +1781,11 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - /* Initialize VF's MTA */ - hdev->accept_mta_mc = true; - ret = hclgevf_cfg_func_mta_filter(&hdev->nic, hdev->accept_mta_mc); + /* Initialize mta type for this VF */ + ret = hclgevf_cfg_func_mta_type(hdev); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to set mta filter mode\n", ret); + "failed(%d) to initialize MTA type\n", ret); goto err_config; } @@ -1683,10 +1809,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) return 0; err_config: - hclgevf_cmd_uninit(hdev); -err_cmd_init: hclgevf_misc_irq_uninit(hdev); err_misc_irq_init: + hclgevf_cmd_uninit(hdev); +err_cmd_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); err_irq_init: @@ -1696,9 +1822,9 @@ err_irq_init: static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { - hclgevf_cmd_uninit(hdev); - hclgevf_misc_irq_uninit(hdev); hclgevf_state_uninit(hdev); + hclgevf_misc_irq_uninit(hdev); + hclgevf_cmd_uninit(hdev); hclgevf_uninit_msi(hdev); hclgevf_pci_uninit(hdev); } @@ -1814,6 +1940,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .rm_uc_addr = hclgevf_rm_uc_addr, .add_mc_addr = hclgevf_add_mc_addr, .rm_mc_addr = hclgevf_rm_mc_addr, + .update_mta_status = hclgevf_update_mta_status, .get_stats = hclgevf_get_stats, .update_stats = hclgevf_update_stats, .get_strings = hclgevf_get_strings, @@ -1825,6 +1952,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_tc_size = hclgevf_get_tc_size, .get_fw_version = hclgevf_get_fw_version, .set_vlan_filter = hclgevf_set_vlan_filter, + .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, .reset_event = hclgevf_reset_event, .get_channels = hclgevf_get_channels, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, @@ -1842,7 +1970,9 @@ static int hclgevf_init(void) { pr_info("%s is initializing\n", HCLGEVF_NAME); - return hnae3_register_ae_algo(&ae_algovf); + hnae3_register_ae_algo(&ae_algovf); + + return 0; } static void hclgevf_exit(void) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index a477a7c36bbd..0656e8e5c5f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -9,7 +9,7 @@ #include "hclgevf_cmd.h" #include "hnae3.h" -#define HCLGEVF_MOD_VERSION "v1.0" +#define HCLGEVF_MOD_VERSION "1.0" #define HCLGEVF_DRIVER_NAME "hclgevf" #define HCLGEVF_ROCEE_VECTOR_NUM 0 @@ -48,6 +48,9 @@ #define HCLGEVF_RSS_CFG_TBL_NUM \ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE) +#define HCLGEVF_MTA_TBL_SIZE 4096 +#define HCLGEVF_MTA_TYPE_SEL_MAX 4 + /* states of hclgevf device & tasks */ enum hclgevf_states { /* device states */ @@ -152,6 +155,7 @@ struct hclgevf_dev { int *vector_irq; bool accept_mta_mc; /* whether to accept mta filter multicast */ + u8 mta_mac_sel_type; bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index a28618428338..b598c06af8e0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -126,6 +126,13 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, return status; } +static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) +{ + u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG); + + return tail == hw->cmq.crq.next_to_use; +} + void hclgevf_mbx_handler(struct hclgevf_dev *hdev) { struct hclgevf_mbx_resp_status *resp; @@ -140,11 +147,22 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) resp = &hdev->mbx_resp; crq = &hdev->hw.cmq.crq; - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); - while (hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B)) { + while (!hclgevf_cmd_crq_empty(&hdev->hw)) { desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; + flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); + if (unlikely(!hnae_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { + dev_warn(&hdev->pdev->dev, + "dropped invalid mailbox message, code = %d\n", + req->msg[0]); + + /* dropping/not processing this invalid message */ + crq->desc[crq->next_to_use].flag = 0; + hclge_mbx_ring_ptr_move_crq(crq); + continue; + } + /* synchronous messages are time critical and need preferential * treatment. Therefore, we need to acknowledge all the sync * responses as quickly as possible so that waiting tasks do not @@ -205,7 +223,6 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) } crq->desc[crq->next_to_use].flag = 0; hclge_mbx_ring_ptr_move_crq(crq); - flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); } /* Write back CMDQ_RQ header pointer, M7 need this pointer */ diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig index 08db24954f7e..e4e8b24c1a5d 100644 --- a/drivers/net/ethernet/huawei/hinic/Kconfig +++ b/drivers/net/ethernet/huawei/hinic/Kconfig @@ -4,7 +4,7 @@ config HINIC tristate "Huawei Intelligent PCIE Network Interface Card" - depends on (PCI_MSI && X86) + depends on (PCI_MSI && (X86 || ARM64)) ---help--- This driver supports HiNIC PCIE Ethernet cards. To compile this driver as part of the kernel, choose Y here. diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index eb53bd93065e..5b122728dcb4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -51,7 +51,9 @@ static unsigned int rx_weight = 64; module_param(rx_weight, uint, 0644); MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); -#define PCI_DEVICE_ID_HI1822_PF 0x1822 +#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 +#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200 +#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201 #define HINIC_WQ_NAME "hinic_dev" @@ -1097,7 +1099,9 @@ static void hinic_remove(struct pci_dev *pdev) } static const struct pci_device_id hinic_pci_table[] = { - { PCI_VDEVICE(HUAWEI, PCI_DEVICE_ID_HI1822_PF), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, { 0, 0} }; MODULE_DEVICE_TABLE(pci, hinic_pci_table); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5ec1185808e5..d0e196bff081 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -109,13 +109,14 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *, struct ibmvnic_sub_crq_queue *); static int ibmvnic_poll(struct napi_struct *napi, int data); static void send_map_query(struct ibmvnic_adapter *adapter); -static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); -static void send_request_unmap(struct ibmvnic_adapter *, u8); +static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8); +static int send_request_unmap(struct ibmvnic_adapter *, u8); static int send_login(struct ibmvnic_adapter *adapter); static void send_cap_queries(struct ibmvnic_adapter *adapter); static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); static int ibmvnic_init(struct ibmvnic_adapter *); +static int ibmvnic_reset_init(struct ibmvnic_adapter *); static void release_crq_queue(struct ibmvnic_adapter *); static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); static int init_crq_queue(struct ibmvnic_adapter *adapter); @@ -172,6 +173,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { struct device *dev = &adapter->vdev->dev; + int rc; ltb->size = size; ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, @@ -185,8 +187,12 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, adapter->map_id++; init_completion(&adapter->fw_done); - send_request_map(adapter, ltb->addr, - ltb->size, ltb->map_id); + rc = send_request_map(adapter, ltb->addr, + ltb->size, ltb->map_id); + if (rc) { + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + return rc; + } wait_for_completion(&adapter->fw_done); if (adapter->fw_done_rc) { @@ -215,10 +221,14 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, static int reset_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb) { + int rc; + memset(ltb->buff, 0, ltb->size); init_completion(&adapter->fw_done); - send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); + rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); + if (rc) + return rc; wait_for_completion(&adapter->fw_done); if (adapter->fw_done_rc) { @@ -789,6 +799,7 @@ static void release_napi(struct ibmvnic_adapter *adapter) kfree(adapter->napi); adapter->napi = NULL; adapter->num_active_rx_napi = 0; + adapter->napi_enabled = false; } static int ibmvnic_login(struct net_device *netdev) @@ -924,6 +935,10 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) /* Partuial success, delay and re-send */ mdelay(1000); resend = true; + } else if (adapter->init_done_rc) { + netdev_warn(netdev, "Unable to set link state, rc=%d\n", + adapter->init_done_rc); + return adapter->init_done_rc; } } while (resend); @@ -956,6 +971,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) struct device *dev = &adapter->vdev->dev; union ibmvnic_crq crq; int len = 0; + int rc; if (adapter->vpd->buff) len = adapter->vpd->len; @@ -963,7 +979,9 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) init_completion(&adapter->fw_done); crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; crq.get_vpd_size.cmd = GET_VPD_SIZE; - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) + return rc; wait_for_completion(&adapter->fw_done); if (!adapter->vpd->len) @@ -996,7 +1014,12 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) crq.get_vpd.cmd = GET_VPD; crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) { + kfree(adapter->vpd->buff); + adapter->vpd->buff = NULL; + return rc; + } wait_for_completion(&adapter->fw_done); return 0; @@ -1695,6 +1718,7 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; union ibmvnic_crq crq; + int rc; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -1705,7 +1729,9 @@ static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data); init_completion(&adapter->fw_done); - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) + return rc; wait_for_completion(&adapter->fw_done); /* netdev->dev_addr is changed in handle_change_mac_rsp function */ return adapter->fw_done_rc ? -EIO : 0; @@ -1787,7 +1813,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, return rc; } - rc = ibmvnic_init(adapter); + rc = ibmvnic_reset_init(adapter); if (rc) return IBMVNIC_INIT_FAILED; @@ -1857,6 +1883,85 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } +static int do_hard_reset(struct ibmvnic_adapter *adapter, + struct ibmvnic_rwi *rwi, u32 reset_state) +{ + struct net_device *netdev = adapter->netdev; + int rc; + + netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n", + rwi->reset_reason); + + netif_carrier_off(netdev); + adapter->reset_reason = rwi->reset_reason; + + ibmvnic_cleanup(netdev); + release_resources(adapter); + release_sub_crqs(adapter, 0); + release_crq_queue(adapter); + + /* remove the closed state so when we call open it appears + * we are coming from the probed state. + */ + adapter->state = VNIC_PROBED; + + rc = init_crq_queue(adapter); + if (rc) { + netdev_err(adapter->netdev, + "Couldn't initialize crq. rc=%d\n", rc); + return rc; + } + + rc = ibmvnic_init(adapter); + if (rc) + return rc; + + /* If the adapter was in PROBE state prior to the reset, + * exit here. + */ + if (reset_state == VNIC_PROBED) + return 0; + + rc = ibmvnic_login(netdev); + if (rc) { + adapter->state = VNIC_PROBED; + return 0; + } + /* netif_set_real_num_xx_queues needs to take rtnl lock here + * unless wait_for_reset is set, in which case the rtnl lock + * has already been taken before initializing the reset + */ + if (!adapter->wait_for_reset) { + rtnl_lock(); + rc = init_resources(adapter); + rtnl_unlock(); + } else { + rc = init_resources(adapter); + } + if (rc) + return rc; + + ibmvnic_disable_irqs(adapter); + adapter->state = VNIC_CLOSED; + + if (reset_state == VNIC_CLOSED) + return 0; + + rc = __ibmvnic_open(netdev); + if (rc) { + if (list_empty(&adapter->rwi_list)) + adapter->state = VNIC_CLOSED; + else + adapter->state = reset_state; + + return 0; + } + + netif_carrier_on(netdev); + + return 0; +} + static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter) { struct ibmvnic_rwi *rwi; @@ -1898,14 +2003,19 @@ static void __ibmvnic_reset(struct work_struct *work) netdev = adapter->netdev; mutex_lock(&adapter->reset_lock); - adapter->resetting = true; reset_state = adapter->state; rwi = get_next_rwi(adapter); while (rwi) { - rc = do_reset(adapter, rwi, reset_state); + if (adapter->force_reset_recovery) { + adapter->force_reset_recovery = false; + rc = do_hard_reset(adapter, rwi, reset_state); + } else { + rc = do_reset(adapter, rwi, reset_state); + } kfree(rwi); - if (rc && rc != IBMVNIC_INIT_FAILED) + if (rc && rc != IBMVNIC_INIT_FAILED && + !adapter->force_reset_recovery) break; rwi = get_next_rwi(adapter); @@ -1931,9 +2041,9 @@ static void __ibmvnic_reset(struct work_struct *work) static int ibmvnic_reset(struct ibmvnic_adapter *adapter, enum ibmvnic_reset_reason reason) { + struct list_head *entry, *tmp_entry; struct ibmvnic_rwi *rwi, *tmp; struct net_device *netdev = adapter->netdev; - struct list_head *entry; int ret; if (adapter->state == VNIC_REMOVING || @@ -1969,11 +2079,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ret = ENOMEM; goto err; } - + /* if we just received a transport event, + * flush reset queue and process this reset + */ + if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) { + list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) + list_del(entry); + } rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); mutex_unlock(&adapter->rwi_lock); - + adapter->resetting = true; netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); @@ -2369,6 +2485,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, struct ibmvnic_adapter *adapter = netdev_priv(dev); union ibmvnic_crq crq; int i, j; + int rc; memset(&crq, 0, sizeof(crq)); crq.request_statistics.first = IBMVNIC_CRQ_CMD; @@ -2379,7 +2496,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, /* Wait for data to be written */ init_completion(&adapter->stats_done); - ibmvnic_send_crq(adapter, &crq); + rc = ibmvnic_send_crq(adapter, &crq); + if (rc) + return; wait_for_completion(&adapter->stats_done); for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) @@ -3154,6 +3273,12 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, (unsigned long int)cpu_to_be64(u64_crq[0]), (unsigned long int)cpu_to_be64(u64_crq[1])); + if (!adapter->crq.active && + crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { + dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n"); + return -EINVAL; + } + /* Make sure the hypervisor sees the complete request */ mb(); @@ -3378,8 +3503,8 @@ buf_alloc_failed: return -1; } -static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, - u32 len, u8 map_id) +static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, + u32 len, u8 map_id) { union ibmvnic_crq crq; @@ -3389,10 +3514,10 @@ static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr, crq.request_map.map_id = map_id; crq.request_map.ioba = cpu_to_be32(addr); crq.request_map.len = cpu_to_be32(len); - ibmvnic_send_crq(adapter, &crq); + return ibmvnic_send_crq(adapter, &crq); } -static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) +static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) { union ibmvnic_crq crq; @@ -3400,7 +3525,7 @@ static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id) crq.request_unmap.first = IBMVNIC_CRQ_CMD; crq.request_unmap.cmd = REQUEST_UNMAP; crq.request_unmap.map_id = map_id; - ibmvnic_send_crq(adapter, &crq); + return ibmvnic_send_crq(adapter, &crq); } static void send_map_query(struct ibmvnic_adapter *adapter) @@ -4227,11 +4352,15 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, dev_info(dev, "Partner initialized\n"); adapter->from_passive_init = true; adapter->failover_pending = false; - complete(&adapter->init_done); + if (!completion_done(&adapter->init_done)) { + complete(&adapter->init_done); + adapter->init_done_rc = -EIO; + } ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); break; case IBMVNIC_CRQ_INIT_COMPLETE: dev_info(dev, "Partner initialization complete\n"); + adapter->crq.active = true; send_version_xchg(adapter); break; default: @@ -4240,6 +4369,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, return; case IBMVNIC_CRQ_XPORT_EVENT: netif_carrier_off(netdev); + adapter->crq.active = false; + if (adapter->resetting) + adapter->force_reset_recovery = true; if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) { dev_info(dev, "Migrated, re-enabling adapter\n"); ibmvnic_reset(adapter, VNIC_RESET_MOBILITY); @@ -4427,6 +4559,7 @@ static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter) /* Clean out the queue */ memset(crq->msgs, 0, PAGE_SIZE); crq->cur = 0; + crq->active = false; /* And re-open it again */ rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, @@ -4461,6 +4594,7 @@ static void release_crq_queue(struct ibmvnic_adapter *adapter) DMA_BIDIRECTIONAL); free_page((unsigned long)crq->msgs); crq->msgs = NULL; + crq->active = false; } static int init_crq_queue(struct ibmvnic_adapter *adapter) @@ -4538,7 +4672,7 @@ map_failed: return retrc; } -static int ibmvnic_init(struct ibmvnic_adapter *adapter) +static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; unsigned long timeout = msecs_to_jiffies(30000); @@ -4597,6 +4731,49 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) return rc; } +static int ibmvnic_init(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + unsigned long timeout = msecs_to_jiffies(30000); + int rc; + + adapter->from_passive_init = false; + + init_completion(&adapter->init_done); + adapter->init_done_rc = 0; + ibmvnic_send_crq_init(adapter); + if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { + dev_err(dev, "Initialization sequence timed out\n"); + return -1; + } + + if (adapter->init_done_rc) { + release_crq_queue(adapter); + return adapter->init_done_rc; + } + + if (adapter->from_passive_init) { + adapter->state = VNIC_OPEN; + adapter->from_passive_init = false; + return -1; + } + + rc = init_sub_crqs(adapter); + if (rc) { + dev_err(dev, "Initialization of sub crqs failed\n"); + release_crq_queue(adapter); + return rc; + } + + rc = init_sub_crq_irqs(adapter); + if (rc) { + dev_err(dev, "Failed to initialize sub crq irqs\n"); + release_crq_queue(adapter); + } + + return rc; +} + static struct device_attribute dev_attr_failover; static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 22391e8805f6..f9fb780102ac 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -865,6 +865,7 @@ struct ibmvnic_crq_queue { int size, cur; dma_addr_t msg_token; spinlock_t lock; + bool active; }; union sub_crq { @@ -1108,6 +1109,7 @@ struct ibmvnic_adapter { bool mac_change_pending; bool failover_pending; + bool force_reset_recovery; struct ibmvnic_tunables desired; struct ibmvnic_tunables fallback; diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 41ad56edfb96..27d5f27163d2 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel PRO/100 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* * e100.c: Intel(R) PRO/100 ethernet driver diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile index c7caadd3c8af..314c52d44b7c 100644 --- a/drivers/net/ethernet/intel/e1000/Makefile +++ b/drivers/net/ethernet/intel/e1000/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel PRO/1000 Linux driver # Copyright(c) 1999 - 2006 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS <linux.nics@intel.com> -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ # # Makefile for the Intel(R) PRO/1000 ethernet driver diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 3a0feea2df54..c40729b2c184 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -1,32 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3e80ca170dd7..5d365a986bb0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2006 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* ethtool support for e1000 */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 6e7e923d57bf..48428d6a00be 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* -* - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - - */ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* e1000_hw.c * Shared functions for accessing and configuring the MAC diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h index f09c569ec19b..b57a04954ccf 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.h +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* e1000_hw.h * Structures, enums, and macros for the MAC diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index d5eb19b86a0a..2110d5f2da19 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ #include "e1000.h" #include <net/ip6_checksum.h> diff --git a/drivers/net/ethernet/intel/e1000/e1000_osdep.h b/drivers/net/ethernet/intel/e1000/e1000_osdep.h index ae0559b8b011..e966bb290797 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_osdep.h +++ b/drivers/net/ethernet/intel/e1000/e1000_osdep.h @@ -1,32 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - +/* Copyright(c) 1999 - 2006 Intel Corporation. */ /* glue for the OS independent part of e1000 * includes register access macros diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c index 345f23927bcc..d3f29ffe1e47 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_param.c +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel PRO/1000 Linux driver - Copyright(c) 1999 - 2006 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2006 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 953e99df420c..257bd59bc9c6 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 80003ES2LAN Gigabit Ethernet Controller (Copper) * 80003ES2LAN Gigabit Ethernet Controller (Serdes) diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h index ee6d1256fda4..aa9d639c6cbb 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_80003ES2LAN_H_ #define _E1000E_80003ES2LAN_H_ diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 924f2c8dfa6c..b9309302c29e 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 82571EB Gigabit Ethernet Controller * 82571EB Gigabit Ethernet Controller (Copper) diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h index 9a24c645f726..834c238d02db 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.h +++ b/drivers/net/ethernet/intel/e1000e/82571.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_82571_H_ #define _E1000E_82571_H_ diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 24e391a4ac68..44e58b6e7660 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -1,30 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel PRO/1000 Linux driver -# Copyright(c) 1999 - 2014 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see <http://www.gnu.org/licenses/>. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS <linux.nics@intel.com> -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) PRO/1000 ethernet driver diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 22883015a695..fd550dee4982 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index da88555ba1fd..c760dc72c520 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 64dc0c11147f..e084cb734eb1 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for e1000 */ diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 21802396bed6..eff75bd8a8f0 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 1551d6ce5341..cdae0efde8e6 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 3c4f82c21084..eb09c755fa17 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_ICH8LAN_H_ #define _E1000E_ICH8LAN_H_ diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b293464a9f27..4abd55d646c5 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h index cb0abf6c76a5..6ab261119801 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.h +++ b/drivers/net/ethernet/intel/e1000e/mac.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_MAC_H_ #define _E1000E_MAC_H_ diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c index e027660aeb92..c4c9b20bc51f 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.c +++ b/drivers/net/ethernet/intel/e1000e/manage.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h index 3268f2e58593..d868aad806d4 100644 --- a/drivers/net/ethernet/intel/e1000e/manage.h +++ b/drivers/net/ethernet/intel/e1000e/manage.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_MANAGE_H_ #define _E1000E_MANAGE_H_ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ec4a9759a6f2..acf1e8b52b8e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -3546,15 +3527,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) } break; case e1000_pch_spt: - if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { - /* Stable 24MHz frequency */ - incperiod = INCPERIOD_24MHZ; - incvalue = INCVALUE_24MHZ; - shift = INCVALUE_SHIFT_24MHZ; - adapter->cc.shift = shift; - break; - } - return -EINVAL; + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHZ; + incvalue = INCVALUE_24MHZ; + shift = INCVALUE_SHIFT_24MHZ; + adapter->cc.shift = shift; + break; case e1000_pch_cnp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 68949bb41b7b..937f9af22d26 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h index 8e082028be7d..6a30dfea4117 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.h +++ b/drivers/net/ethernet/intel/e1000e/nvm.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_NVM_H_ #define _E1000E_NVM_H_ diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 2def33eba9e6..098369fd3e65 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/netdevice.h> #include <linux/module.h> diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index b8226ed0e338..42233019255a 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "e1000.h" diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index d4180b5e9196..c48777d09523 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_PHY_H_ #define _E1000E_PHY_H_ diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index f941e5085f44..37c76945ad9b 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -1,24 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* PTP 1588 Hardware Clock (PHC) * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 16afc3c2a986..47f5ca793970 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -1,24 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel PRO/1000 Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000E_REGS_H_ #define _E1000E_REGS_H_ diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index 93277cb99cb7..26a9746ccb14 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,26 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel(R) Ethernet Switch Host Interface Driver -# Copyright(c) 2013 - 2016 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 2013 - 2018 Intel Corporation. # # Makefile for the Intel(R) Ethernet Switch Host Interface Driver diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index a9cdf763c59d..a903a0ba45e1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_H_ #define _FM10K_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index e303d88720ef..f51a63fca513 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_common.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h index 2bdb24d2ca9d..4c48fb73b3e7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_COMMON_H_ #define _FM10K_COMMON_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index c4f733452ef2..20768ac7f17e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 43e8d839831f..dca104121c05 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 28b6b4e56487..7657daa27298 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,40 +1,32 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/vmalloc.h> #include "fm10k.h" struct fm10k_stats { + /* The stat_string is expected to be a format string formatted using + * vsnprintf by fm10k_add_stat_strings. Every member of a stats array + * should use the same format specifiers as they will be formatted + * using the same variadic arguments. + */ char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; -#define FM10K_NETDEV_STAT(_net_stat) { \ - .stat_string = #_net_stat, \ - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ - .stat_offset = offsetof(struct net_device_stats, _net_stat) \ +#define FM10K_STAT_FIELDS(_type, _name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ } +/* netdevice statistics */ +#define FM10K_NETDEV_STAT(_net_stat) \ + FM10K_STAT_FIELDS(struct net_device_stats, __stringify(_net_stat), \ + _net_stat) + static const struct fm10k_stats fm10k_gstrings_net_stats[] = { FM10K_NETDEV_STAT(tx_packets), FM10K_NETDEV_STAT(tx_bytes), @@ -52,11 +44,9 @@ static const struct fm10k_stats fm10k_gstrings_net_stats[] = { #define FM10K_NETDEV_STATS_LEN ARRAY_SIZE(fm10k_gstrings_net_stats) -#define FM10K_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct fm10k_intfc, _stat), \ - .stat_offset = offsetof(struct fm10k_intfc, _stat) \ -} +/* General interface statistics */ +#define FM10K_STAT(_name, _stat) \ + FM10K_STAT_FIELDS(struct fm10k_intfc, _name, _stat) static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("tx_restart_queue", restart_queue), @@ -93,11 +83,9 @@ static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { FM10K_STAT("nodesc_drop", stats.nodesc_drop.count), }; -#define FM10K_MBX_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct fm10k_mbx_info, _stat), \ - .stat_offset = offsetof(struct fm10k_mbx_info, _stat) \ -} +/* mailbox statistics */ +#define FM10K_MBX_STAT(_name, _stat) \ + FM10K_STAT_FIELDS(struct fm10k_mbx_info, _name, _stat) static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_tx_busy", tx_busy), @@ -111,15 +99,13 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), }; -#define FM10K_QUEUE_STAT(_name, _stat) { \ - .stat_string = _name, \ - .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \ - .stat_offset = offsetof(struct fm10k_ring, _stat) \ -} +/* per-queue ring statistics */ +#define FM10K_QUEUE_STAT(_name, _stat) \ + FM10K_STAT_FIELDS(struct fm10k_ring, _name, _stat) static const struct fm10k_stats fm10k_gstrings_queue_stats[] = { - FM10K_QUEUE_STAT("packets", stats.packets), - FM10K_QUEUE_STAT("bytes", stats.bytes), + FM10K_QUEUE_STAT("%s_queue_%u_packets", stats.packets), + FM10K_QUEUE_STAT("%s_queue_%u_bytes", stats.bytes), }; #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) @@ -149,49 +135,44 @@ enum { static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { }; -static void fm10k_add_stat_strings(u8 **p, const char *prefix, - const struct fm10k_stats stats[], - const unsigned int size) +static void __fm10k_add_stat_strings(u8 **p, const struct fm10k_stats stats[], + const unsigned int size, ...) { unsigned int i; for (i = 0; i < size; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%s%s", - prefix, stats[i].stat_string); + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); *p += ETH_GSTRING_LEN; + va_end(args); } } +#define fm10k_add_stat_strings(p, stats, ...) \ + __fm10k_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); unsigned int i; - fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats, - FM10K_NETDEV_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_net_stats); - fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats, - FM10K_GLOBAL_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_global_stats); - fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats, - FM10K_MBX_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_mbx_stats); if (interface->hw.mac.type != fm10k_mac_vf) - fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats, - FM10K_PF_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_pf_stats); for (i = 0; i < interface->hw.mac.max_queues; i++) { - char prefix[ETH_GSTRING_LEN]; - - snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i); - fm10k_add_stat_strings(&data, prefix, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, + "tx", i); - snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i); - fm10k_add_stat_strings(&data, prefix, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_add_stat_strings(&data, fm10k_gstrings_queue_stats, + "rx", i); } } @@ -236,9 +217,9 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) } } -static void fm10k_add_ethtool_stats(u64 **data, void *pointer, - const struct fm10k_stats stats[], - const unsigned int size) +static void __fm10k_add_ethtool_stats(u64 **data, void *pointer, + const struct fm10k_stats stats[], + const unsigned int size) { unsigned int i; char *p; @@ -267,11 +248,16 @@ static void fm10k_add_ethtool_stats(u64 **data, void *pointer, *((*data)++) = *(u8 *)p; break; default: + WARN_ONCE(1, "unexpected stat size for %s", + stats[i].stat_string); *((*data)++) = 0; } } } +#define fm10k_add_ethtool_stats(data, pointer, stats) \ + __fm10k_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + static void fm10k_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats __always_unused *stats, u64 *data) @@ -282,20 +268,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, fm10k_update_stats(interface); - fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats, - FM10K_NETDEV_STATS_LEN); + fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats); - fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats, - FM10K_GLOBAL_STATS_LEN); + fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats); fm10k_add_ethtool_stats(&data, &interface->hw.mbx, - fm10k_gstrings_mbx_stats, - FM10K_MBX_STATS_LEN); + fm10k_gstrings_mbx_stats); if (interface->hw.mac.type != fm10k_mac_vf) { fm10k_add_ethtool_stats(&data, interface, - fm10k_gstrings_pf_stats, - FM10K_PF_STATS_LEN); + fm10k_gstrings_pf_stats); } for (i = 0; i < interface->hw.mac.max_queues; i++) { @@ -303,13 +285,11 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev, ring = interface->tx_ring[i]; fm10k_add_ethtool_stats(&data, ring, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_gstrings_queue_stats); ring = interface->rx_ring[i]; fm10k_add_ethtool_stats(&data, ring, - fm10k_gstrings_queue_stats, - FM10K_QUEUE_STATS_LEN); + fm10k_gstrings_queue_stats); } } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 30395f5e5e87..e707d717012f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" #include "fm10k_vf.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index df8607097e4a..3f536541f45f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -445,15 +427,14 @@ static void fm10k_type_trans(struct fm10k_ring *rx_ring, l2_accel = NULL; } - skb->protocol = eth_type_trans(skb, dev); - /* Record Rx queue, or update macvlan statistics */ if (!l2_accel) skb_record_rx_queue(skb, rx_ring->queue_index); else macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, - (skb->pkt_type == PACKET_BROADCAST) || - (skb->pkt_type == PACKET_MULTICAST)); + false); + + skb->protocol = eth_type_trans(skb, dev); } /** diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index c01bf30a0c9e..21021fe4f1c3 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_common.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 007e1dfa9b7a..56d1abff04e2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_MBX_H_ #define _FM10K_MBX_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 45793491d4ba..929f538d28bc 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,27 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k.h" #include <linux/vmalloc.h> #include <net/udp_tunnel.h> +#include <linux/if_macvlan.h> /** * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) @@ -924,7 +907,9 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) { struct fm10k_intfc *interface = netdev_priv(netdev); + struct fm10k_l2_accel *l2_accel = interface->l2_accel; struct fm10k_hw *hw = &interface->hw; + u16 glort; s32 err; int i; @@ -992,6 +977,22 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) if (err) goto err_out; + /* Update L2 accelerated macvlan addresses */ + if (l2_accel) { + for (i = 0; i < l2_accel->size; i++) { + struct net_device *sdev = l2_accel->macvlan[i]; + + if (!sdev) + continue; + + glort = l2_accel->dglort + 1 + i; + + fm10k_queue_mac_request(interface, glort, + sdev->dev_addr, + vid, set); + } + } + /* set VLAN ID prior to syncing/unsyncing the VLAN */ interface->vid = vid + (set ? VLAN_N_VID : 0); @@ -1231,6 +1232,22 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) fm10k_queue_mac_request(interface, glort, hw->mac.addr, vid, true); + + /* synchronize macvlan addresses */ + if (l2_accel) { + for (i = 0; i < l2_accel->size; i++) { + struct net_device *sdev = l2_accel->macvlan[i]; + + if (!sdev) + continue; + + glort = l2_accel->dglort + 1 + i; + + fm10k_queue_mac_request(interface, glort, + sdev->dev_addr, + vid, true); + } + } } /* update xcast mode before synchronizing addresses if host's mailbox @@ -1254,7 +1271,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) glort = l2_accel->dglort + 1 + i; hw->mac.ops.update_xcast_mode(hw, glort, - FM10K_XCAST_MODE_MULTI); + FM10K_XCAST_MODE_NONE); fm10k_queue_mac_request(interface, glort, sdev->dev_addr, hw->mac.default_vid, true); @@ -1447,7 +1464,14 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, struct fm10k_dglort_cfg dglort = { 0 }; struct fm10k_hw *hw = &interface->hw; int size = 0, i; - u16 glort; + u16 vid, glort; + + /* The hardware supported by fm10k only filters on the destination MAC + * address. In order to avoid issues we only support offloading modes + * where the hardware can actually provide the functionality. + */ + if (!macvlan_supports_dest_filter(sdev)) + return ERR_PTR(-EMEDIUMTYPE); /* allocate l2 accel structure if it is not available */ if (!l2_accel) { @@ -1513,12 +1537,18 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, glort = l2_accel->dglort + 1 + i; - if (fm10k_host_mbx_ready(interface)) { + if (fm10k_host_mbx_ready(interface)) hw->mac.ops.update_xcast_mode(hw, glort, - FM10K_XCAST_MODE_MULTI); + FM10K_XCAST_MODE_NONE); + + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + hw->mac.default_vid, true); + + for (vid = fm10k_find_next_vlan(interface, 0); + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) fm10k_queue_mac_request(interface, glort, sdev->dev_addr, - hw->mac.default_vid, true); - } + vid, true); fm10k_mbx_unlock(interface); @@ -1532,8 +1562,8 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) struct fm10k_dglort_cfg dglort = { 0 }; struct fm10k_hw *hw = &interface->hw; struct net_device *sdev = priv; + u16 vid, glort; int i; - u16 glort; if (!l2_accel) return; @@ -1553,12 +1583,18 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) glort = l2_accel->dglort + 1 + i; - if (fm10k_host_mbx_ready(interface)) { + if (fm10k_host_mbx_ready(interface)) hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); + + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + hw->mac.default_vid, false); + + for (vid = fm10k_find_next_vlan(interface, 0); + vid < VLAN_N_VID; + vid = fm10k_find_next_vlan(interface, vid)) fm10k_queue_mac_request(interface, glort, sdev->dev_addr, - hw->mac.default_vid, false); - } + vid, false); fm10k_mbx_unlock(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index c4a2b688b38b..15071e4adb98 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/module.h> #include <linux/interrupt.h> diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 7ba54c534f8c..8f0a99b6a537 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_pf.h" #include "fm10k_vf.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index ae81f9a16602..8e814df709d2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_PF_H_ #define _FM10K_PF_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index 725ecb7abccd..2a7a40bf2b1c 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2018 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_tlv.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index 5d2ee759507e..160bc5b78f99 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_TLV_H_ #define _FM10K_TLV_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index dd23af11e2c1..3e608e493f9d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_TYPE_H_ #define _FM10K_TYPE_H_ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index f06913630b39..a8519c1f0406 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,23 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "fm10k_vf.h" diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index 66a66b73a2f1..787d0d570a28 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -1,23 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _FM10K_VF_H_ #define _FM10K_VF_H_ diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 75437768a07c..14397e7e9925 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -1,29 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel Ethernet Controller XL710 Family Linux Driver -# Copyright(c) 2013 - 2015 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see <http://www.gnu.org/licenses/>. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 2013 - 2018 Intel Corporation. # # Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index a44139c1de80..7a80652e2500 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_H_ #define _I40E_H_ @@ -334,10 +310,12 @@ struct i40e_tc_configuration { struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS]; }; +#define I40E_UDP_PORT_INDEX_UNUSED 255 struct i40e_udp_port_config { /* AdminQ command interface expects port number in Host byte order */ u16 port; u8 type; + u8 filter_index; }; /* macros related to FLX_PIT */ @@ -608,7 +586,7 @@ struct i40e_pf { unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ - u64 ptp_base_adj; + u32 ptp_adj_mult; u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; @@ -1009,6 +987,9 @@ void i40e_service_event_schedule(struct i40e_pf *pf); void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len); +int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, bool is_xdp, + bool enable); +int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable); int i40e_vsi_start_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 843fc7781ef8..ddbea79d18e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_status.h" #include "i40e_type.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 0a8749ee9fd3..edec3df78971 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 0244923edeb8..7d888e05f96f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h index abed0c52e782..cb8689222c8b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index d8ce4999864f..5f3b8b9ff511 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/list.h> #include <linux/errno.h> @@ -64,7 +40,7 @@ static struct i40e_ops i40e_lan_ops = { /** * i40e_client_get_params - Get the params that can change at runtime * @vsi: the VSI with the message - * @param: clinet param struct + * @params: client param struct * **/ static @@ -590,7 +566,7 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev, * i40e_client_setup_qvlist * @ldev: pointer to L2 context. * @client: Client pointer. - * @qv_info: queue and vector list + * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ @@ -665,7 +641,7 @@ err: * i40e_client_request_reset * @ldev: pointer to L2 context. * @client: Client pointer. - * @level: reset level + * @reset_level: reset level **/ static void i40e_client_request_reset(struct i40e_info *ldev, struct i40e_client *client, diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index 9d464d40bc17..72994baf4941 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_CLIENT_H_ #define _I40E_CLIENT_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index c0a3dae8a2db..eb2d1530d331 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_type.h" #include "i40e_adminq.h" @@ -1695,6 +1671,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, /** * i40e_set_fc * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart * * Set the requested flow control mode using set_phy_config. **/ @@ -2831,8 +2809,8 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for * VEBs/VEPA elements only @@ -2892,8 +2870,8 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only **/ @@ -2923,8 +2901,8 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, * add_mirrorrule. * @mr_list: list of mirrored VLAN IDs to be removed * @cmd_details: pointer to command details structure or NULL - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only **/ @@ -3672,6 +3650,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, /** * i40e_aq_start_lldp * @hw: pointer to the hw struct + * @buff: buffer for result + * @buff_size: buffer size * @cmd_details: pointer to command details structure or NULL * * Start the embedded LLDP Agent on all ports. @@ -3752,7 +3732,6 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add in Host byte order - * @header_len: length of the tunneling header length in DWords * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL @@ -3971,6 +3950,7 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, * @hw: pointer to the hw struct * @seid: seid of the switching component connected to Physical Port * @ets_data: Buffer holding ETS parameters + * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL **/ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, @@ -4314,10 +4294,10 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, * @hw: pointer to the hw struct * @seid: VSI seid to add ethertype filter from **/ -#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 seid) { +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; @@ -4448,6 +4428,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) * @ret_buff_size: actual buffer size returned * @ret_next_table: next block to read * @ret_next_index: next index to read + * @cmd_details: pointer to command details structure or NULL * * Dump internal FW/HW data for debug purposes. * @@ -4574,7 +4555,7 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, * i40e_read_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -4619,7 +4600,7 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, * i40e_write_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes specified PHY register value @@ -4660,7 +4641,7 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -4734,7 +4715,7 @@ phy_read_end: * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register @@ -4801,7 +4782,7 @@ phy_write_end: * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register @@ -4837,7 +4818,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -4872,7 +4853,6 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want - * @phy_addr: Returned PHY address * * Gets PHY address for current port **/ @@ -5082,7 +5062,9 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, * i40e_led_set_phy * @hw: pointer to the HW structure * @on: true or false + * @led_addr: address of led register to use * @mode: original val plus bit for set or ignore + * * Set led's on or off when controlled by the PHY * **/ @@ -5371,6 +5353,7 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes + * @flags: AdminQ command flags * @cmd_details: pointer to command details structure or NULL **/ enum diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 9fec728dc4b9..56bff8faf371 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_adminq.h" #include "i40e_prototype.h" @@ -945,6 +921,70 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw) } /** + * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * @module: address of the module pointer + * @word_offset: offset of LLDP configuration + * + * Reads the LLDP configuration data from NVM using passed addresses + **/ +static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg, + u8 module, u32 word_offset) +{ + u32 address, offset = (2 * word_offset); + i40e_status ret; + __le16 raw_mem; + u16 mem; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + return ret; + + ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(raw_mem), &raw_mem, + true, NULL); + i40e_release_nvm(hw); + if (ret) + return ret; + + mem = le16_to_cpu(raw_mem); + /* Check if this pointer needs to be read in word size or 4K sector + * units. + */ + if (mem & I40E_PTR_TYPE) + address = (0x7FFF & mem) * 4096; + else + address = (0x7FFF & mem) * 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, module, offset, sizeof(raw_mem), &raw_mem, + true, NULL); + i40e_release_nvm(hw); + if (ret) + return ret; + + mem = le16_to_cpu(raw_mem); + offset = mem + word_offset; + offset *= 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, 0, address + offset, + sizeof(struct i40e_lldp_variables), lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} + +/** * i40e_read_lldp_cfg - read LLDP Configuration data from NVM * @hw: pointer to the HW structure * @lldp_cfg: pointer to hold lldp configuration variables @@ -955,21 +995,34 @@ i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg) { i40e_status ret = 0; - u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + u32 mem; if (!lldp_cfg) return I40E_ERR_PARAM; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret) - goto err_lldp_cfg; + return ret; - ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, - sizeof(struct i40e_lldp_variables), - (u8 *)lldp_cfg, - true, NULL); + ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem), + &mem, true, NULL); i40e_release_nvm(hw); + if (ret) + return ret; + + /* Read a bit that holds information whether we are running flat or + * structured NVM image. Flat image has LLDP configuration in shadow + * ram, so there is a need to pass different addresses for both cases. + */ + if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) { + /* Flat NVM case */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR, + I40E_SR_LLDP_CFG_PTR); + } else { + /* Good old structured NVM image */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR, + I40E_NVM_LLDP_CFG_PTR); + } -err_lldp_cfg: return ret; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 4f806386cb22..2b748a60a843 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DCB_H_ #define _I40E_DCB_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 502818e3da78..9deae9a35423 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifdef CONFIG_I40E_DCB #include "i40e.h" @@ -47,7 +23,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) /** * i40e_dcbnl_ieee_getets - retrieve local IEEE ETS configuration - * @netdev: the corresponding netdev + * @dev: the corresponding netdev * @ets: structure to hold the ETS information * * Returns local IEEE ETS configuration @@ -86,8 +62,8 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev, /** * i40e_dcbnl_ieee_getpfc - retrieve local IEEE PFC configuration - * @netdev: the corresponding netdev - * @ets: structure to hold the PFC information + * @dev: the corresponding netdev + * @pfc: structure to hold the PFC information * * Returns local IEEE PFC configuration **/ @@ -119,7 +95,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev, /** * i40e_dcbnl_getdcbx - retrieve current DCBx capability - * @netdev: the corresponding netdev + * @dev: the corresponding netdev * * Returns DCBx capability features **/ @@ -132,7 +108,8 @@ static u8 i40e_dcbnl_getdcbx(struct net_device *dev) /** * i40e_dcbnl_get_perm_hw_addr - MAC address used by DCBx - * @netdev: the corresponding netdev + * @dev: the corresponding netdev + * @perm_addr: buffer to store the MAC address * * Returns the SAN MAC address used for LLDP exchange **/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index d494dcaf18d0..56b911a5dd8b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifdef CONFIG_DEBUG_FS @@ -36,8 +12,8 @@ static struct dentry *i40e_dbg_root; /** * i40e_dbg_find_vsi - searches for the vsi with the given seid - * @pf - the PF structure to search for the vsi - * @seid - seid of the vsi it is searching for + * @pf: the PF structure to search for the vsi + * @seid: seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) { @@ -55,8 +31,8 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) /** * i40e_dbg_find_veb - searches for the veb with the given seid - * @pf - the PF structure to search for the veb - * @seid - seid of the veb it is searching for + * @pf: the PF structure to search for the veb + * @seid: seid of the veb it is searching for **/ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index ad6a66ccb576..334b05ff685a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DEVIDS_H_ #define _I40E_DEVIDS_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index df3e60470f8b..ef4d3762bf37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_diag.h" #include "i40e_prototype.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index be8341763475..c3340f320a18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DIAG_H_ #define _I40E_DIAG_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index b974482ff630..6947a2a571cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* ethtool support for i40e */ @@ -43,13 +19,13 @@ struct i40e_stats { } #define I40E_NETDEV_STAT(_net_stat) \ - I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) #define I40E_PF_STAT(_name, _stat) \ - I40E_STAT(struct i40e_pf, _name, _stat) + I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ - I40E_STAT(struct i40e_vsi, _name, _stat) + I40E_STAT(struct i40e_vsi, _name, _stat) #define I40E_VEB_STAT(_name, _stat) \ - I40E_STAT(struct i40e_veb, _name, _stat) + I40E_STAT(struct i40e_veb, _name, _stat) static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_packets), @@ -66,18 +42,18 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = { }; static const struct i40e_stats i40e_gstrings_veb_stats[] = { - I40E_VEB_STAT("rx_bytes", stats.rx_bytes), - I40E_VEB_STAT("tx_bytes", stats.tx_bytes), - I40E_VEB_STAT("rx_unicast", stats.rx_unicast), - I40E_VEB_STAT("tx_unicast", stats.tx_unicast), - I40E_VEB_STAT("rx_multicast", stats.rx_multicast), - I40E_VEB_STAT("tx_multicast", stats.tx_multicast), - I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast), - I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast), - I40E_VEB_STAT("rx_discards", stats.rx_discards), - I40E_VEB_STAT("tx_discards", stats.tx_discards), - I40E_VEB_STAT("tx_errors", stats.tx_errors), - I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol), + I40E_VEB_STAT("veb.rx_bytes", stats.rx_bytes), + I40E_VEB_STAT("veb.tx_bytes", stats.tx_bytes), + I40E_VEB_STAT("veb.rx_unicast", stats.rx_unicast), + I40E_VEB_STAT("veb.tx_unicast", stats.tx_unicast), + I40E_VEB_STAT("veb.rx_multicast", stats.rx_multicast), + I40E_VEB_STAT("veb.tx_multicast", stats.tx_multicast), + I40E_VEB_STAT("veb.rx_broadcast", stats.rx_broadcast), + I40E_VEB_STAT("veb.tx_broadcast", stats.tx_broadcast), + I40E_VEB_STAT("veb.rx_discards", stats.rx_discards), + I40E_VEB_STAT("veb.tx_discards", stats.tx_discards), + I40E_VEB_STAT("veb.tx_errors", stats.tx_errors), + I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol), }; static const struct i40e_stats i40e_gstrings_misc_stats[] = { @@ -90,6 +66,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), + I40E_VSI_STAT("tx_busy", tx_busy), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), }; @@ -105,76 +82,77 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { * is queried on the base PF netdev, not on the VMDq or FCoE netdev. */ static const struct i40e_stats i40e_gstrings_stats[] = { - I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes), - I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes), - I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast), - I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast), - I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast), - I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast), - I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), - I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), - I40E_PF_STAT("tx_errors", stats.eth.tx_errors), - I40E_PF_STAT("rx_dropped", stats.eth.rx_discards), - I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), - I40E_PF_STAT("rx_crc_errors", stats.crc_errors), - I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), - I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), - I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), - I40E_PF_STAT("tx_timeout", tx_timeout_count), - I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error), - I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), - I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), - I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), - I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), - I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), - I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx), - I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx), - I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx), - I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx), - I40E_PF_STAT("rx_size_64", stats.rx_size_64), - I40E_PF_STAT("rx_size_127", stats.rx_size_127), - I40E_PF_STAT("rx_size_255", stats.rx_size_255), - I40E_PF_STAT("rx_size_511", stats.rx_size_511), - I40E_PF_STAT("rx_size_1023", stats.rx_size_1023), - I40E_PF_STAT("rx_size_1522", stats.rx_size_1522), - I40E_PF_STAT("rx_size_big", stats.rx_size_big), - I40E_PF_STAT("tx_size_64", stats.tx_size_64), - I40E_PF_STAT("tx_size_127", stats.tx_size_127), - I40E_PF_STAT("tx_size_255", stats.tx_size_255), - I40E_PF_STAT("tx_size_511", stats.tx_size_511), - I40E_PF_STAT("tx_size_1023", stats.tx_size_1023), - I40E_PF_STAT("tx_size_1522", stats.tx_size_1522), - I40E_PF_STAT("tx_size_big", stats.tx_size_big), - I40E_PF_STAT("rx_undersize", stats.rx_undersize), - I40E_PF_STAT("rx_fragments", stats.rx_fragments), - I40E_PF_STAT("rx_oversize", stats.rx_oversize), - I40E_PF_STAT("rx_jabber", stats.rx_jabber), - I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), - I40E_PF_STAT("arq_overflows", arq_overflows), - I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), - I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), - I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), - I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), - I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), - I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status), - I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match), - I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status), + I40E_PF_STAT("port.rx_bytes", stats.eth.rx_bytes), + I40E_PF_STAT("port.tx_bytes", stats.eth.tx_bytes), + I40E_PF_STAT("port.rx_unicast", stats.eth.rx_unicast), + I40E_PF_STAT("port.tx_unicast", stats.eth.tx_unicast), + I40E_PF_STAT("port.rx_multicast", stats.eth.rx_multicast), + I40E_PF_STAT("port.tx_multicast", stats.eth.tx_multicast), + I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), + I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), + I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors), + I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards), + I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), + I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors), + I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes), + I40E_PF_STAT("port.mac_local_faults", stats.mac_local_faults), + I40E_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults), + I40E_PF_STAT("port.tx_timeout", tx_timeout_count), + I40E_PF_STAT("port.rx_csum_bad", hw_csum_rx_error), + I40E_PF_STAT("port.rx_length_errors", stats.rx_length_errors), + I40E_PF_STAT("port.link_xon_rx", stats.link_xon_rx), + I40E_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx), + I40E_PF_STAT("port.link_xon_tx", stats.link_xon_tx), + I40E_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx), + I40E_PF_STAT("port.rx_size_64", stats.rx_size_64), + I40E_PF_STAT("port.rx_size_127", stats.rx_size_127), + I40E_PF_STAT("port.rx_size_255", stats.rx_size_255), + I40E_PF_STAT("port.rx_size_511", stats.rx_size_511), + I40E_PF_STAT("port.rx_size_1023", stats.rx_size_1023), + I40E_PF_STAT("port.rx_size_1522", stats.rx_size_1522), + I40E_PF_STAT("port.rx_size_big", stats.rx_size_big), + I40E_PF_STAT("port.tx_size_64", stats.tx_size_64), + I40E_PF_STAT("port.tx_size_127", stats.tx_size_127), + I40E_PF_STAT("port.tx_size_255", stats.tx_size_255), + I40E_PF_STAT("port.tx_size_511", stats.tx_size_511), + I40E_PF_STAT("port.tx_size_1023", stats.tx_size_1023), + I40E_PF_STAT("port.tx_size_1522", stats.tx_size_1522), + I40E_PF_STAT("port.tx_size_big", stats.tx_size_big), + I40E_PF_STAT("port.rx_undersize", stats.rx_undersize), + I40E_PF_STAT("port.rx_fragments", stats.rx_fragments), + I40E_PF_STAT("port.rx_oversize", stats.rx_oversize), + I40E_PF_STAT("port.rx_jabber", stats.rx_jabber), + I40E_PF_STAT("port.VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("port.arq_overflows", arq_overflows), + I40E_PF_STAT("port.tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + I40E_PF_STAT("port.rx_hwtstamp_cleared", rx_hwtstamp_cleared), + I40E_PF_STAT("port.tx_hwtstamp_skipped", tx_hwtstamp_skipped), + I40E_PF_STAT("port.fdir_flush_cnt", fd_flush_cnt), + I40E_PF_STAT("port.fdir_atr_match", stats.fd_atr_match), + I40E_PF_STAT("port.fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), + I40E_PF_STAT("port.fdir_atr_status", stats.fd_atr_status), + I40E_PF_STAT("port.fdir_sb_match", stats.fd_sb_match), + I40E_PF_STAT("port.fdir_sb_status", stats.fd_sb_status), /* LPI stats */ - I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status), - I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status), - I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count), - I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), + I40E_PF_STAT("port.tx_lpi_status", stats.tx_lpi_status), + I40E_PF_STAT("port.rx_lpi_status", stats.rx_lpi_status), + I40E_PF_STAT("port.tx_lpi_count", stats.tx_lpi_count), + I40E_PF_STAT("port.rx_lpi_count", stats.rx_lpi_count), }; -#define I40E_QUEUE_STATS_LEN(n) \ - (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ +/* We use num_tx_queues here as a proxy for the maximum number of queues + * available because we always allocate queues symmetrically. + */ +#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues) +#define I40E_QUEUE_STATS_LEN(n) \ + (I40E_MAX_NUM_QUEUES(n) \ * 2 /* Tx and Rx together */ \ * (sizeof(struct i40e_queue_stats) / sizeof(u64))) #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) -#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) +#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) -#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ +#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ I40E_MISC_STATS_LEN + \ I40E_QUEUE_STATS_LEN((n))) #define I40E_PFC_STATS_LEN ( \ @@ -977,7 +955,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_test_link_mode(ks, advertising, 10000baseCR_Full) || ethtool_link_ksettings_test_link_mode(ks, advertising, - 10000baseSR_Full)) + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) config.link_speed |= I40E_LINK_SPEED_10GB; if (ethtool_link_ksettings_test_link_mode(ks, advertising, 20000baseKR2_Full)) @@ -1079,6 +1059,9 @@ static int i40e_nway_reset(struct net_device *netdev) /** * i40e_get_pauseparam - Get Flow Control status + * @netdev: netdevice structure + * @pause: buffer to return pause parameters + * * Return tx/rx-pause status **/ static void i40e_get_pauseparam(struct net_device *netdev, @@ -1677,6 +1660,32 @@ done: return err; } +/** + * i40e_get_stats_count - return the stats count for a device + * @netdev: the netdev to return the count for + * + * Returns the total number of statistics for this netdev. Note that even + * though this is a function, it is required that the count for a specific + * netdev must never change. Basing the count on static values such as the + * maximum number of queues or the device type is ok. However, the API for + * obtaining stats is *not* safe against changes based on non-static + * values such as the *current* number of queues, or runtime flags. + * + * If a statistic is not always enabled, return it as part of the count + * anyways, always return its string, and report its value as zero. + **/ +static int i40e_get_stats_count(struct net_device *netdev) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + + if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) + return I40E_PF_STATS_LEN(netdev) + I40E_VEB_STATS_TOTAL; + else + return I40E_VSI_STATS_LEN(netdev); +} + static int i40e_get_sset_count(struct net_device *netdev, int sset) { struct i40e_netdev_priv *np = netdev_priv(netdev); @@ -1687,16 +1696,7 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_TEST: return I40E_TEST_LEN; case ETH_SS_STATS: - if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { - int len = I40E_PF_STATS_LEN(netdev); - - if ((pf->lan_veb != I40E_NO_VEB) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) - len += I40E_VEB_STATS_TOTAL; - return len; - } else { - return I40E_VSI_STATS_LEN(netdev); - } + return i40e_get_stats_count(netdev); case ETH_SS_PRIV_FLAGS: return I40E_PRIV_FLAGS_STR_LEN + (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0); @@ -1705,6 +1705,20 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } } +/** + * i40e_get_ethtool_stats - copy stat values into supplied buffer + * @netdev: the netdev to collect stats for + * @stats: ethtool stats command structure + * @data: ethtool supplied buffer + * + * Copy the stats values for this netdev into the buffer. Expects data to be + * pre-allocated to the size returned by i40e_get_stats_count.. Note that all + * statistics must be copied in a static order, and the count must not change + * for a given netdev. See i40e_get_stats_count for more details. + * + * If a statistic is not currently valid (such as a disabled queue), this + * function reports its value as zero. + **/ static void i40e_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { @@ -1712,47 +1726,54 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - unsigned int j; - int i = 0; + unsigned int i; char *p; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; i40e_update_stats(vsi); - for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) { - p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset; - data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat == + for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { + p = (char *)net_stats + i40e_gstrings_net_stats[i].stat_offset; + *(data++) = (i40e_gstrings_net_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < I40E_MISC_STATS_LEN; j++) { - p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset; - data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat == + for (i = 0; i < I40E_MISC_STATS_LEN; i++) { + p = (char *)vsi + i40e_gstrings_misc_stats[i].stat_offset; + *(data++) = (i40e_gstrings_misc_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } rcu_read_lock(); - for (j = 0; j < vsi->num_queue_pairs; j++) { - tx_ring = READ_ONCE(vsi->tx_rings[j]); + for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) { + tx_ring = READ_ONCE(vsi->tx_rings[i]); - if (!tx_ring) + if (!tx_ring) { + /* Bump the stat counter to skip these stats, and make + * sure the memory is zero'd + */ + *(data++) = 0; + *(data++) = 0; + *(data++) = 0; + *(data++) = 0; continue; + } /* process Tx ring statistics */ do { start = u64_stats_fetch_begin_irq(&tx_ring->syncp); - data[i] = tx_ring->stats.packets; - data[i + 1] = tx_ring->stats.bytes; + data[0] = tx_ring->stats.packets; + data[1] = tx_ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); - i += 2; + data += 2; /* Rx ring is the 2nd half of the queue pair */ rx_ring = &tx_ring[1]; do { start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - data[i] = rx_ring->stats.packets; - data[i + 1] = rx_ring->stats.bytes; + data[0] = rx_ring->stats.packets; + data[1] = rx_ring->stats.bytes; } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); - i += 2; + data += 2; } rcu_read_unlock(); if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) @@ -1762,38 +1783,131 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { struct i40e_veb *veb = pf->veb[pf->lan_veb]; - for (j = 0; j < I40E_VEB_STATS_LEN; j++) { + for (i = 0; i < I40E_VEB_STATS_LEN; i++) { p = (char *)veb; - p += i40e_gstrings_veb_stats[j].stat_offset; - data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat == + p += i40e_gstrings_veb_stats[i].stat_offset; + *(data++) = (i40e_gstrings_veb_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < I40E_MAX_TRAFFIC_CLASS; j++) { - data[i++] = veb->tc_stats.tc_tx_packets[j]; - data[i++] = veb->tc_stats.tc_tx_bytes[j]; - data[i++] = veb->tc_stats.tc_rx_packets[j]; - data[i++] = veb->tc_stats.tc_rx_bytes[j]; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + *(data++) = veb->tc_stats.tc_tx_packets[i]; + *(data++) = veb->tc_stats.tc_tx_bytes[i]; + *(data++) = veb->tc_stats.tc_rx_packets[i]; + *(data++) = veb->tc_stats.tc_rx_bytes[i]; } + } else { + data += I40E_VEB_STATS_TOTAL; } - for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { - p = (char *)pf + i40e_gstrings_stats[j].stat_offset; - data[i++] = (i40e_gstrings_stats[j].sizeof_stat == + for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { + p = (char *)pf + i40e_gstrings_stats[i].stat_offset; + *(data++) = (i40e_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { - data[i++] = pf->stats.priority_xon_tx[j]; - data[i++] = pf->stats.priority_xoff_tx[j]; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + *(data++) = pf->stats.priority_xon_tx[i]; + *(data++) = pf->stats.priority_xoff_tx[i]; } - for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) { - data[i++] = pf->stats.priority_xon_rx[j]; - data[i++] = pf->stats.priority_xoff_rx[j]; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + *(data++) = pf->stats.priority_xon_rx[i]; + *(data++) = pf->stats.priority_xoff_rx[i]; } - for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) - data[i++] = pf->stats.priority_xon_2_xoff[j]; + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + *(data++) = pf->stats.priority_xon_2_xoff[i]; } -static void i40e_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) +/** + * i40e_get_stat_strings - copy stat strings into supplied buffer + * @netdev: the netdev to collect strings for + * @data: supplied buffer to copy strings into + * + * Copy the strings related to stats for this netdev. Expects data to be + * pre-allocated with the size reported by i40e_get_stats_count. Note that the + * strings must be copied in a static order and the total count must not + * change for a given netdev. See i40e_get_stats_count for more details. + **/ +static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + unsigned int i; + u8 *p = data; + + for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { + snprintf(data, ETH_GSTRING_LEN, "%s", + i40e_gstrings_net_stats[i].stat_string); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MISC_STATS_LEN; i++) { + snprintf(data, ETH_GSTRING_LEN, "%s", + i40e_gstrings_misc_stats[i].stat_string); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { + snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); + data += ETH_GSTRING_LEN; + } + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + return; + + for (i = 0; i < I40E_VEB_STATS_LEN; i++) { + snprintf(data, ETH_GSTRING_LEN, "%s", + i40e_gstrings_veb_stats[i].stat_string); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + snprintf(data, ETH_GSTRING_LEN, + "veb.tc_%u_tx_packets", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, + "veb.tc_%u_tx_bytes", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, + "veb.tc_%u_rx_packets", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, + "veb.tc_%u_rx_bytes", i); + data += ETH_GSTRING_LEN; + } + + for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { + snprintf(data, ETH_GSTRING_LEN, "%s", + i40e_gstrings_stats[i].stat_string); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(data, ETH_GSTRING_LEN, + "port.tx_priority_%u_xon", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, + "port.tx_priority_%u_xoff", i); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(data, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon", i); + data += ETH_GSTRING_LEN; + snprintf(data, ETH_GSTRING_LEN, + "port.rx_priority_%u_xoff", i); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + snprintf(data, ETH_GSTRING_LEN, + "port.rx_priority_%u_xon_2_xoff", i); + data += ETH_GSTRING_LEN; + } + + WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, + "stat strings count mismatch!"); +} + +static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1801,98 +1915,33 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, char *p = (char *)data; unsigned int i; + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } + if (pf->hw.pf_id != 0) + return; + for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + i40e_gl_gstrings_priv_flags[i].flag_string); + p += ETH_GSTRING_LEN; + } +} + +static void i40e_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ switch (stringset) { case ETH_SS_TEST: memcpy(data, i40e_gstrings_test, I40E_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_net_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MISC_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_misc_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < vsi->num_queue_pairs; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%d.rx_bytes", i); - p += ETH_GSTRING_LEN; - } - if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) - return; - - if ((pf->lan_veb != I40E_NO_VEB) && - (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { - for (i = 0; i < I40E_VEB_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "veb.%s", - i40e_gstrings_veb_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_tx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_tx_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_rx_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "veb.tc_%d_rx_bytes", i); - p += ETH_GSTRING_LEN; - } - } - for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "port.%s", - i40e_gstrings_stats[i].stat_string); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%d_xon", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "port.tx_priority_%d_xoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%d_xon", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%d_xoff", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - snprintf(p, ETH_GSTRING_LEN, - "port.rx_priority_%d_xon_2_xoff", i); - p += ETH_GSTRING_LEN; - } - /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ + i40e_get_stat_strings(netdev, data); break; case ETH_SS_PRIV_FLAGS: - for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } - if (pf->hw.pf_id != 0) - break; - for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40e_gl_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } + i40e_get_priv_flag_strings(netdev, data); break; default: break; @@ -2550,7 +2599,7 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) /** * i40e_check_mask - Check whether a mask field is set * @mask: the full mask value - * @field; mask of the field to check + * @field: mask of the field to check * * If the given mask is fully set, return positive value. If the mask for the * field is fully unset, return zero. Otherwise return a negative error code. @@ -2621,6 +2670,7 @@ static int i40e_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, /** * i40e_fill_rx_flow_user_data - Fill in user-defined data field * @fsp: pointer to rx_flow specification + * @data: pointer to return userdef data * * Reads the userdef data structure and properly fills in the user defined * fields of the rx_flow_spec. @@ -2799,6 +2849,7 @@ no_input_set: * i40e_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule data * * Returns Success if the command is supported. **/ @@ -2840,7 +2891,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, /** * i40e_get_rss_hash_bits - Read RSS Hash bits from register * @nfc: pointer to user request - * @i_setc bits currently set + * @i_setc: bits currently set * * Returns value of bits to be set per user request **/ @@ -2885,7 +2936,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) /** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct - * @cmd: ethtool rxnfc command + * @nfc: ethtool rxnfc command * * Returns Success if the flow input set is supported. **/ @@ -3284,7 +3335,7 @@ static int i40e_add_flex_offset(struct list_head *flex_pit_list, * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table * @pf: Pointer to the PF structure * @flex_pit_list: list of flexible src offsets in use - * #flex_pit_start: index to first entry for this section of the table + * @flex_pit_start: index to first entry for this section of the table * * In order to handle flexible data, the hardware uses a table of values * called the FLX_PIT table. This table is used to indicate which sections of @@ -3398,7 +3449,7 @@ static void i40e_reprogram_flex_pit(struct i40e_pf *pf) /** * i40e_flow_str - Converts a flow_type into a human readable string - * @flow_type: the flow type from a flow specification + * @fsp: the flow specification * * Currently only flow types we support are included here, and the string * value attempts to match what ethtool would use to configure this flow type. @@ -4103,7 +4154,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi) /** * i40e_get_channels - Get the current channels enabled and max supported etc. - * @netdev: network interface device structure + * @dev: network interface device structure * @ch: ethtool channels structure * * We don't support separate tx and rx queues as channels. The other count @@ -4112,7 +4163,7 @@ static unsigned int i40e_max_channels(struct i40e_vsi *vsi) * q_vectors since we support a lot more queue pairs than q_vectors. **/ static void i40e_get_channels(struct net_device *dev, - struct ethtool_channels *ch) + struct ethtool_channels *ch) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -4131,14 +4182,14 @@ static void i40e_get_channels(struct net_device *dev, /** * i40e_set_channels - Set the new channels count. - * @netdev: network interface device structure + * @dev: network interface device structure * @ch: ethtool channels structure * * The new channels count may not be the same as requested by the user * since it gets rounded down to a power of 2 value. **/ static int i40e_set_channels(struct net_device *dev, - struct ethtool_channels *ch) + struct ethtool_channels *ch) { const u8 drop = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET; struct i40e_netdev_priv *np = netdev_priv(dev); @@ -4273,6 +4324,7 @@ out: * @netdev: network interface device structure * @indir: indirection table * @key: hash key + * @hfunc: hash function to use * * Returns -EINVAL if the table specifies an invalid queue id, otherwise * returns 0 after programming the table. diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 6d4b590f851b..19ce93d7fd0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_osdep.h" #include "i40e_register.h" @@ -198,7 +174,6 @@ exit: * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index - * @is_pf: distinguishes a VF from a PF * * This function: * 1. Marks the entry in pd tabe (for paged address mode) or in sd table diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 7b5fd33d70ae..1c78de838857 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c index cd40dc487b38..994011c38fb4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_osdep.h" #include "i40e_register.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h index 79e1396735d9..c46a2c449e60 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 16229998fb1e..c944bd10b03d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/etherdevice.h> #include <linux/of_net.h> @@ -278,8 +254,8 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) /** * i40e_find_vsi_from_id - searches for the vsi with the given id - * @pf - the pf structure to search for the vsi - * @id - id of the vsi it is searching for + * @pf: the pf structure to search for the vsi + * @id: id of the vsi it is searching for **/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) { @@ -435,6 +411,7 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring, /** * i40e_get_netdev_stats_struct - Get statistics for netdev interface * @netdev: network interface device structure + * @stats: data structure to store statistics * * Returns the address of the device statistics structure. * The statistics are actually updated from the service task. @@ -2027,7 +2004,7 @@ struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next) * from firmware * @count: Number of filters added * @add_list: return data from fw - * @head: pointer to first filter in current batch + * @add_head: pointer to first filter in current batch * * MAC filter entries from list were slated to be added to device. Returns * number of successful filters. Note that 0 does NOT mean success! @@ -2134,6 +2111,7 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name, /** * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags * @vsi: pointer to the VSI + * @vsi_name: the VSI name * @f: filter data * * This function sets or clears the promiscuous broadcast flags for VLAN @@ -2840,6 +2818,7 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid) /** * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload * @netdev: network interface to be adjusted + * @proto: unused protocol value * @vid: vlan id to be added * * net_device_ops implementation for adding vlan ids @@ -2862,8 +2841,26 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, } /** + * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path + * @netdev: network interface to be adjusted + * @proto: unused protocol value + * @vid: vlan id to be added + **/ +static void i40e_vlan_rx_add_vid_up(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + + if (vid >= VLAN_N_VID) + return; + set_bit(vid, vsi->active_vlans); +} + +/** * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload * @netdev: network interface to be adjusted + * @proto: unused protocol value * @vid: vlan id to be removed * * net_device_ops implementation for removing vlan ids @@ -2902,8 +2899,8 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi) i40e_vlan_stripping_disable(vsi); for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) - i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), - vid); + i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), + vid); } /** @@ -3485,7 +3482,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) /** * i40e_enable_misc_int_causes - enable the non-queue interrupts - * @hw: ptr to the hardware info + * @pf: pointer to private device data structure **/ static void i40e_enable_misc_int_causes(struct i40e_pf *pf) { @@ -4255,8 +4252,8 @@ static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable) * @is_xdp: true if the queue is used for XDP * @enable: start or stop the queue **/ -static int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, - bool is_xdp, bool enable) +int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q, + bool is_xdp, bool enable) { int ret; @@ -4301,7 +4298,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable) if (ret) break; } - return ret; } @@ -4340,9 +4336,9 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) * @pf_q: the PF queue to configure * @enable: start or stop the queue * - * This function enables or disables a single queue. Note that any delay - * required after the operation is expected to be handled by the caller of - * this function. + * This function enables or disables a single queue. Note that + * any delay required after the operation is expected to be + * handled by the caller of this function. **/ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) { @@ -4372,6 +4368,30 @@ static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable) } /** + * i40e_control_wait_rx_q + * @pf: the PF structure + * @pf_q: queue being configured + * @enable: start or stop the rings + * + * This function enables or disables a single queue along with waiting + * for the change to finish. The caller of this function should handle + * the delays needed in the case of disabling queues. + **/ +int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable) +{ + int ret = 0; + + i40e_control_rx_q(pf, pf_q, enable); + + /* wait for the change to finish */ + ret = i40e_pf_rxq_wait(pf, pf_q, enable); + if (ret) + return ret; + + return ret; +} + +/** * i40e_vsi_control_rx - Start or stop a VSI's rings * @vsi: the VSI being configured * @enable: start or stop the rings @@ -4383,10 +4403,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) pf_q = vsi->base_queue; for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { - i40e_control_rx_q(pf, pf_q, enable); - - /* wait for the change to finish */ - ret = i40e_pf_rxq_wait(pf, pf_q, enable); + ret = i40e_control_wait_rx_q(pf, pf_q, enable); if (ret) { dev_info(&pf->pdev->dev, "VSI seid %d Rx ring %d %sable timeout\n", @@ -5096,7 +5113,7 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi) * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC * @vsi: the VSI being configured * @enabled_tc: TC bitmap - * @bw_credits: BW shared credits per TC + * @bw_share: BW shared credits per TC * * Returns 0 on success, negative value on failure **/ @@ -6353,6 +6370,7 @@ out: /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message + * @isup: true of link is up, false otherwise */ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { @@ -7212,8 +7230,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, if (mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { - mask->dst = be32_to_cpu(mask->dst); - dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n", + dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", &mask->dst); return I40E_ERR_CONFIG; } @@ -7223,8 +7240,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, if (mask->src == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { - mask->src = be32_to_cpu(mask->src); - dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n", + dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", &mask->src); return I40E_ERR_CONFIG; } @@ -9691,9 +9707,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } -static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) +static const char *i40e_tunnel_name(u8 type) { - switch (port->type) { + switch (type) { case UDP_TUNNEL_TYPE_VXLAN: return "vxlan"; case UDP_TUNNEL_TYPE_GENEVE: @@ -9727,37 +9743,68 @@ static void i40e_sync_udp_filters(struct i40e_pf *pf) static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; - i40e_status ret; + u8 filter_index, type; u16 port; int i; if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state)) return; + /* acquire RTNL to maintain state of flags and port requests */ + rtnl_lock(); + for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { if (pf->pending_udp_bitmap & BIT_ULL(i)) { + struct i40e_udp_port_config *udp_port; + i40e_status ret = 0; + + udp_port = &pf->udp_ports[i]; pf->pending_udp_bitmap &= ~BIT_ULL(i); - port = pf->udp_ports[i].port; + + port = READ_ONCE(udp_port->port); + type = READ_ONCE(udp_port->type); + filter_index = READ_ONCE(udp_port->filter_index); + + /* release RTNL while we wait on AQ command */ + rtnl_unlock(); + if (port) ret = i40e_aq_add_udp_tunnel(hw, port, - pf->udp_ports[i].type, - NULL, NULL); - else - ret = i40e_aq_del_udp_tunnel(hw, i, NULL); + type, + &filter_index, + NULL); + else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED) + ret = i40e_aq_del_udp_tunnel(hw, filter_index, + NULL); + + /* reacquire RTNL so we can update filter_index */ + rtnl_lock(); if (ret) { dev_info(&pf->pdev->dev, "%s %s port %d, index %d failed, err %s aq_err %s\n", - i40e_tunnel_name(&pf->udp_ports[i]), + i40e_tunnel_name(type), port ? "add" : "delete", - port, i, + port, + filter_index, i40e_stat_str(&pf->hw, ret), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); - pf->udp_ports[i].port = 0; + if (port) { + /* failed to add, just reset port, + * drop pending bit for any deletion + */ + udp_port->port = 0; + pf->pending_udp_bitmap &= ~BIT_ULL(i); + } + } else if (port) { + /* record filter index on success */ + udp_port->filter_index = filter_index; } } } + + rtnl_unlock(); } /** @@ -10004,7 +10051,7 @@ unlock_pf: /** * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI - * @type: VSI pointer + * @vsi: VSI pointer * @free_qvectors: a bool to specify if q_vectors need to be freed. * * On error: returns error code (negative) @@ -10279,21 +10326,28 @@ static int i40e_init_msix(struct i40e_pf *pf) /* any vectors left over go for VMDq support */ if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { - int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; - int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted); - if (!vectors_left) { pf->num_vmdq_msix = 0; pf->num_vmdq_qps = 0; } else { + int vmdq_vecs_wanted = + pf->num_vmdq_vsis * pf->num_vmdq_qps; + int vmdq_vecs = + min_t(int, vectors_left, vmdq_vecs_wanted); + /* if we're short on vectors for what's desired, we limit * the queues per vmdq. If this is still more than are * available, the user will need to change the number of * queues/vectors used by the PF later with the ethtool * channels command */ - if (vmdq_vecs < vmdq_vecs_wanted) + if (vectors_left < vmdq_vecs_wanted) { pf->num_vmdq_qps = 1; + vmdq_vecs_wanted = pf->num_vmdq_vsis; + vmdq_vecs = min_t(int, + vectors_left, + vmdq_vecs_wanted); + } pf->num_vmdq_msix = pf->num_vmdq_qps; v_budget += vmdq_vecs; @@ -10800,7 +10854,7 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) * @vsi: Pointer to VSI structure * @seed: Buffer to store the keys * @lut: Buffer to store the lookup table entries - * lut_size: Size of buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries * * Returns 0 on success, negative on failure */ @@ -11374,6 +11428,11 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port) u8 i; for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) { + /* Do not report ports with pending deletions as + * being available. + */ + if (!port && (pf->pending_udp_bitmap & BIT_ULL(i))) + continue; if (pf->udp_ports[i].port == port) return i; } @@ -11428,6 +11487,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev, /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].port = port; + pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED; pf->pending_udp_bitmap |= BIT_ULL(next_idx); set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); } @@ -11469,7 +11529,12 @@ static void i40e_udp_tunnel_del(struct net_device *netdev, * and make it pending */ pf->udp_ports[idx].port = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); + + /* Toggle pending bit instead of setting it. This way if we are + * deleting a port that has yet to be added we just clear the pending + * bit and don't have to worry about it. + */ + pf->pending_udp_bitmap ^= BIT_ULL(idx); set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state); return; @@ -11500,6 +11565,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev, * @tb: pointer to array of nladdr (unused) * @dev: the net device pointer * @addr: the MAC address entry being added + * @vid: VLAN ID * @flags: instructions from stack about fdb operation */ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], @@ -11545,6 +11611,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * i40e_ndo_bridge_setlink - Set the hardware bridge mode * @dev: the netdev being configured * @nlh: RTNL message + * @flags: bridge flags * * Inserts a new hardware bridge if not already created and * enables the bridging mode requested (VEB or VEPA). If the @@ -11816,7 +11883,6 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, - .ndo_xdp_flush = i40e_xdp_flush, }; /** @@ -14118,6 +14184,7 @@ static void i40e_remove(struct pci_dev *pdev) /** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct + * @error: the type of PCI error * * Called to warn that something happened and the error handling steps * are in progress. Allows the driver to quiesce things, be ready for diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index ba9687c03795..0299e5bbb902 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_prototype.h" @@ -1173,6 +1149,7 @@ void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) * i40e_nvmupd_check_wait_event - handle NVM update operation events * @hw: pointer to the hardware structure * @opcode: the event that just happened + * @desc: AdminQ descriptor **/ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, struct i40e_aq_desc *desc) diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 9c3c3b0d3ac4..a07574bff550 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_OSDEP_H_ #define _I40E_OSDEP_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 2ec24188d6e2..3170655cdeb9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 5b47dd1f75a5..35f2866b38c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" #include <linux/ptp_classify.h> @@ -40,9 +16,9 @@ * At 1Gb link, the period is multiplied by 20. (32ns) * 1588 functionality is not supported at 100Mbps. */ -#define I40E_PTP_40GB_INCVAL 0x0199999999ULL -#define I40E_PTP_10GB_INCVAL 0x0333333333ULL -#define I40E_PTP_1GB_INCVAL 0x2000000000ULL +#define I40E_PTP_40GB_INCVAL 0x0199999999ULL +#define I40E_PTP_10GB_INCVAL_MULT 2 +#define I40E_PTP_1GB_INCVAL_MULT 20 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ @@ -130,17 +106,24 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ppb = -ppb; } - smp_mb(); /* Force any pending update before accessing. */ - adj = READ_ONCE(pf->ptp_base_adj); - - freq = adj; + freq = I40E_PTP_40GB_INCVAL; freq *= ppb; diff = div_u64(freq, 1000000000ULL); if (neg_adj) - adj -= diff; + adj = I40E_PTP_40GB_INCVAL - diff; else - adj += diff; + adj = I40E_PTP_40GB_INCVAL + diff; + + /* At some link speeds, the base incval is so large that directly + * multiplying by ppb would result in arithmetic overflow even when + * using a u64. Avoid this by instead calculating the new incval + * always in terms of the 40GbE clock rate and then multiplying by the + * link speed factor afterwards. This does result in slightly lower + * precision at lower link speeds, but it is fairly minor. + */ + smp_mb(); /* Force any pending update before accessing. */ + adj *= READ_ONCE(pf->ptp_adj_mult); wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF); wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32); @@ -334,10 +317,12 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) * This watchdog task is run periodically to make sure that we clear the Tx * timestamp logic if we don't obtain a timestamp in a reasonable amount of * time. It is unexpected in the normal case but if it occurs it results in - * permanently prevent timestamps of future packets + * permanently preventing timestamps of future packets. **/ void i40e_ptp_tx_hang(struct i40e_pf *pf) { + struct sk_buff *skb; + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) return; @@ -350,9 +335,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf) * within a second it is reasonable to assume that we never will. */ if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { - dev_kfree_skb_any(pf->ptp_tx_skb); + skb = pf->ptp_tx_skb; pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); + + /* Free the skb after we clear the bitlock */ + dev_kfree_skb_any(skb); pf->tx_hwtstamp_timeouts++; } } @@ -462,6 +450,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) struct i40e_link_status *hw_link_info; struct i40e_hw *hw = &pf->hw; u64 incval; + u32 mult; hw_link_info = &hw->phy.link_info; @@ -469,10 +458,10 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) switch (hw_link_info->link_speed) { case I40E_LINK_SPEED_10GB: - incval = I40E_PTP_10GB_INCVAL; + mult = I40E_PTP_10GB_INCVAL_MULT; break; case I40E_LINK_SPEED_1GB: - incval = I40E_PTP_1GB_INCVAL; + mult = I40E_PTP_1GB_INCVAL_MULT; break; case I40E_LINK_SPEED_100MB: { @@ -483,15 +472,20 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) "1588 functionality is not supported at 100 Mbps. Stopping the PHC.\n"); warn_once++; } - incval = 0; + mult = 0; break; } case I40E_LINK_SPEED_40GB: default: - incval = I40E_PTP_40GB_INCVAL; + mult = 1; break; } + /* The increment value is calculated by taking the base 40GbE incvalue + * and multiplying it by a factor based on the link speed. + */ + incval = I40E_PTP_40GB_INCVAL * mult; + /* Write the new increment value into the increment register. The * hardware will not update the clock until both registers have been * written. @@ -500,14 +494,14 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); /* Update the base adjustement value. */ - WRITE_ONCE(pf->ptp_base_adj, incval); + WRITE_ONCE(pf->ptp_adj_mult, mult); smp_mb(); /* Force the above update. */ } /** * i40e_ptp_get_ts_config - ioctl interface to read the HW timestamping * @pf: Board private structure - * @ifreq: ioctl data + * @ifr: ioctl data * * Obtain the current hardware timestamping settigs as requested. To do this, * keep a shadow copy of the timestamp settings rather than attempting to @@ -651,7 +645,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, /** * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping * @pf: Board private structure - * @ifreq: ioctl data + * @ifr: ioctl data * * Respond to the user filter requests and make the appropriate hardware * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping @@ -805,9 +799,11 @@ void i40e_ptp_stop(struct i40e_pf *pf) pf->ptp_rx = false; if (pf->ptp_tx_skb) { - dev_kfree_skb_any(pf->ptp_tx_skb); + struct sk_buff *skb = pf->ptp_tx_skb; + pf->ptp_tx_skb = NULL; clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); + dev_kfree_skb_any(skb); } if (pf->ptp_clock) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index b3e206e49cc2..52e3680c57f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h index 10c86f63dc52..77be0702d07c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_status.h +++ b/drivers/net/ethernet/intel/i40e/i40e_status.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_STATUS_H_ #define _I40E_STATUS_H_ diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index 410ba13bcf21..424f02077e2e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel(R) 40-10 Gigabit Ethernet Connection Network Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Modeled on trace-events-sample.h */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f174c72480ab..8ffb7454e67c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/prefetch.h> #include <net/busy_poll.h> @@ -495,7 +471,7 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, /** * i40e_add_del_fdir - Build raw packets to add/del fdir filter * @vsi: pointer to the targeted VSI - * @cmd: command to get or set RX flow classification rules + * @input: filter to add or delete * @add: true adds a filter, false removes it * **/ @@ -638,7 +614,7 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) kfree(tx_buffer->raw_buf); else if (ring_is_xdp(ring)) - page_frag_free(tx_buffer->raw_buf); + xdp_return_frame(tx_buffer->xdpf); else dev_kfree_skb_any(tx_buffer->skb); if (dma_unmap_len(tx_buffer, len)) @@ -713,7 +689,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) /** * i40e_get_tx_pending - how many tx descriptors not processed - * @tx_ring: the ring of descriptors + * @ring: the ring of descriptors * @in_sw: use SW variables * * Since there is no access to the ring head register @@ -841,7 +817,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, /* free the skb/XDP data */ if (ring_is_xdp(tx_ring)) - page_frag_free(tx_buf->raw_buf); + xdp_return_frame(tx_buf->xdpf); else napi_consume_skb(tx_buf->skb, napi_budget); @@ -1795,6 +1771,8 @@ static inline int i40e_ptype_to_htype(u8 ptype) * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor + * @skb: skb currently being received and modified + * @rx_ptype: Rx packet type **/ static inline void i40e_rx_hash(struct i40e_ring *ring, union i40e_rx_desc *rx_desc, @@ -2054,6 +2032,21 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, #if L1_CACHE_BYTES < 128 prefetch(xdp->data + L1_CACHE_BYTES); #endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags <dev> legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via i40e_build_skb(). The latter + * provides us currently with 192 bytes of headroom. + * + * For i40e_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * change in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ /* allocate a skb to store the frags */ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, @@ -2105,19 +2098,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, struct i40e_rx_buffer *rx_buffer, struct xdp_buff *xdp) { - unsigned int size = xdp->data_end - xdp->data; + unsigned int metasize = xdp->data - xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; #else unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(I40E_SKB_PAD + size); + SKB_DATA_ALIGN(I40E_SKB_PAD + + (xdp->data_end - + xdp->data_hard_start)); #endif struct sk_buff *skb; - /* prefetch first cache line of first page */ - prefetch(xdp->data); + /* Prefetch first cache line of first page. If xdp->data_meta + * is unused, this points exactly as xdp->data, otherwise we + * likely have a consumer accessing first few bytes of meta + * data, and then actual data. + */ + prefetch(xdp->data_meta); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data + L1_CACHE_BYTES); + prefetch(xdp->data_meta + L1_CACHE_BYTES); #endif /* build an skb around the page buffer */ skb = build_skb(xdp->data_hard_start, truesize); @@ -2125,8 +2124,10 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, return NULL; /* update pointers within the skb to store the data */ - skb_reserve(skb, I40E_SKB_PAD); - __skb_put(skb, size); + skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start)); + __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); /* buffer is used by skb, update page_offset */ #if (PAGE_SIZE < 8192) @@ -2203,9 +2204,20 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, #define I40E_XDP_CONSUMED 1 #define I40E_XDP_TX 2 -static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, +static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); +static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, + struct i40e_ring *xdp_ring) +{ + struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); + + if (unlikely(!xdpf)) + return I40E_XDP_CONSUMED; + + return i40e_xmit_xdp_ring(xdpf, xdp_ring); +} + /** * i40e_run_xdp - run an XDP program * @rx_ring: Rx ring being processed @@ -2225,13 +2237,15 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, if (!xdp_prog) goto xdp_out; + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - result = i40e_xmit_xdp_ring(xdp, xdp_ring); + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); @@ -2350,7 +2364,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; - xdp_set_data_meta_invalid(&xdp); + xdp.data_meta = xdp.data; xdp.data_hard_start = xdp.data - i40e_rx_offset(rx_ring); xdp.data_end = xdp.data + size; @@ -3478,13 +3492,13 @@ dma_error: * @xdp: data to transmit * @xdp_ring: XDP Tx ring **/ -static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, +static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring) { - u32 size = xdp->data_end - xdp->data; u16 i = xdp_ring->next_to_use; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; + u32 size = xdpf->len; dma_addr_t dma; if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { @@ -3492,14 +3506,14 @@ static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, return I40E_XDP_CONSUMED; } - dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE); + dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE); if (dma_mapping_error(xdp_ring->dev, dma)) return I40E_XDP_CONSUMED; tx_bi = &xdp_ring->tx_bi[i]; tx_bi->bytecount = size; tx_bi->gso_segs = 1; - tx_bi->raw_buf = xdp->data; + tx_bi->xdpf = xdpf; /* record length, and DMA address */ dma_unmap_len_set(tx_bi, len, size); @@ -3673,14 +3687,21 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) * @dev: netdev * @xdp: XDP buffer * - * Returns Zero if sent, else an error code + * Returns number of frames successfully sent. Frames that fail are + * free'ed via XDP return API. + * + * For error cases, a negative errno code is returned and no-frames + * are transmitted (caller must handle freeing frames). **/ -int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) { struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; - int err; + struct i40e_ring *xdp_ring; + int drops = 0; + int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; @@ -3688,28 +3709,24 @@ int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) return -ENXIO; - err = i40e_xmit_xdp_ring(xdp, vsi->xdp_rings[queue_index]); - if (err != I40E_XDP_TX) - return -ENOSPC; + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; - return 0; -} + xdp_ring = vsi->xdp_rings[queue_index]; -/** - * i40e_xdp_flush - Implements ndo_xdp_flush - * @dev: netdev - **/ -void i40e_xdp_flush(struct net_device *dev) -{ - struct i40e_netdev_priv *np = netdev_priv(dev); - unsigned int queue_index = smp_processor_id(); - struct i40e_vsi *vsi = np->vsi; + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; - if (test_bit(__I40E_VSI_DOWN, vsi->state)) - return; + err = i40e_xmit_xdp_ring(xdpf, xdp_ring); + if (err != I40E_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } - if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) - return; + if (unlikely(flags & XDP_XMIT_FLUSH)) + i40e_xdp_ring_update_tail(xdp_ring); - i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]); + return n - drops; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3043483ec426..bb04f6a731fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ @@ -306,6 +282,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) struct i40e_tx_buffer { struct i40e_tx_desc *next_to_watch; union { + struct xdp_frame *xdpf; struct sk_buff *skb; void *raw_buf; }; @@ -510,8 +487,8 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); void i40e_detect_recover_hung(struct i40e_vsi *vsi); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); -int i40e_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp); -void i40e_xdp_flush(struct net_device *dev); +int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index bfb80092b352..7df969c59855 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ @@ -1318,7 +1294,8 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_EMP_MODULE_PTR 0x0F +#define I40E_SR_EMP_MODULE_PTR 0x48 #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 #define I40E_SR_BOOT_CONFIG_PTR 0x17 @@ -1337,6 +1314,8 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) +#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) #define I40E_PTR_TYPE BIT(15) #define I40E_SR_OCP_CFG_WORD0 0x2B #define I40E_SR_OCP_ENABLED BIT(15) @@ -1454,7 +1433,8 @@ enum i40e_reset_type { }; /* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0xD +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 struct i40e_lldp_variables { u16 length; u16 adminstatus; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 35173cbe80f7..c6d24eaede18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e.h" @@ -32,8 +8,8 @@ /** * i40e_vc_vf_broadcast * @pf: pointer to the PF structure - * @opcode: operation code - * @retval: return value + * @v_opcode: operation code + * @v_retval: return value * @msg: pointer to the msg buffer * @msglen: msg length * @@ -1663,6 +1639,7 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, /** * i40e_vc_get_version_msg * @vf: pointer to the VF info + * @msg: pointer to the msg buffer * * called from the VF to request the API version used by the PF **/ @@ -1706,7 +1683,6 @@ static void i40e_del_qch(struct i40e_vf *vf) * i40e_vc_get_vf_resources_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to request its resources **/ @@ -1830,8 +1806,6 @@ err: /** * i40e_vc_reset_vf_msg * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * @msglen: msg length * * called from the VF to reset itself, * unlike other virtchnl messages, PF driver @@ -2180,6 +2154,51 @@ error_param: } /** + * i40e_ctrl_vf_tx_rings + * @vsi: the SRIOV VSI being configured + * @q_map: bit map of the queues to be enabled + * @enable: start or stop the queue + **/ +static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + int ret = 0; + u16 q_id; + + for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { + ret = i40e_control_wait_tx_q(vsi->seid, pf, + vsi->base_queue + q_id, + false /*is xdp*/, enable); + if (ret) + break; + } + return ret; +} + +/** + * i40e_ctrl_vf_rx_rings + * @vsi: the SRIOV VSI being configured + * @q_map: bit map of the queues to be enabled + * @enable: start or stop the queue + **/ +static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + int ret = 0; + u16 q_id; + + for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) { + ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id, + enable); + if (ret) + break; + } + return ret; +} + +/** * i40e_vc_enable_queues_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2211,8 +2230,17 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx])) + /* Use the queue bit map sent by the VF */ + if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, + true)) { aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } + if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, + true)) { + aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } /* need to start the rings for additional ADq VSI's as well */ if (vf->adq_enabled) { @@ -2260,8 +2288,17 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]); - + /* Use the queue bit map sent by the VF */ + if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues, + false)) { + aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } + if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues, + false)) { + aq_ret = I40E_ERR_TIMEOUT; + goto error_param; + } error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, @@ -3556,15 +3593,16 @@ err: * i40e_vc_process_vf_msg * @pf: pointer to the PF structure * @vf_id: source VF id + * @v_opcode: operation code + * @v_retval: unused return value code * @msg: pointer to the msg buffer * @msglen: msg length - * @msghndl: msg handle * * called from the common aeq/arq handler to * process request from VF **/ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) + u32 __always_unused v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; @@ -4015,7 +4053,8 @@ error_pvid: * i40e_ndo_set_vf_bw * @netdev: network interface device structure * @vf_id: VF identifier - * @tx_rate: Tx rate + * @min_tx_rate: Minimum Tx rate + * @max_tx_rate: Maximum Tx rate * * configure VF Tx rate **/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 57f727bb9e36..bf67d62e2b5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_VIRTCHNL_PF_H_ #define _I40E_VIRTCHNL_PF_H_ diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile index 1e89c5487676..3c5c6e962280 100644 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ b/drivers/net/ethernet/intel/i40evf/Makefile @@ -1,29 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver -# Copyright(c) 2013 - 2014 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see <http://www.gnu.org/licenses/>. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ +# Copyright(c) 2013 - 2018 Intel Corporation. # ## Makefile for the Intel(R) 40GbE VF driver diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 6fd677efa9da..c355120dfdfd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_status.h" #include "i40e_type.h" diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index a7137c165256..1f264b9b6805 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_H_ #define _I40E_ADMINQ_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 439e71882049..aa81e87cd471 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ADMINQ_CMD_H_ #define _I40E_ADMINQ_CMD_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h index 7e0fddd8af36..cb8689222c8b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_ALLOC_H_ #define _I40E_ALLOC_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 67140cdbcd7a..9cef54971312 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40e_type.h" #include "i40e_adminq.h" @@ -1255,6 +1231,7 @@ i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes + * @flags: AdminQ command flags * @cmd_details: pointer to command details structure or NULL **/ enum diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index 352dd3f3eb6a..f300bf271824 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_DEVIDS_H_ #define _I40E_DEVIDS_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h index 7432596164f4..1c78de838857 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_HMC_H_ #define _I40E_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h index ddac0e4908d3..82b00f70a632 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_LAN_HMC_H_ #define _I40E_LAN_HMC_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index 8668ad6c1a65..3ddddb46455b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_OSDEP_H_ #define _I40E_OSDEP_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index 72501bd0f1a9..a358f4b9d5aa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_PROTOTYPE_H_ #define _I40E_PROTOTYPE_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h index c9c935659758..49e1f57d99cc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h index 0d7993ecb99a..77be0702d07c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_STATUS_H_ #define _I40E_STATUS_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/i40evf/i40e_trace.h index ece01dd12a3c..d7a4e68820a8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_trace.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel(R) 40-10 Gigabit Ethernet Virtual Function Driver - * Copyright(c) 2013 - 2017 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* Modeled on trace-events-sample.h */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 12bd937861e7..a9730711e257 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/prefetch.h> #include <net/busy_poll.h> @@ -129,7 +105,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) /** * i40evf_get_tx_pending - how many Tx descriptors not processed - * @tx_ring: the ring of descriptors + * @ring: the ring of descriptors * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register @@ -1070,6 +1046,8 @@ static inline int i40e_ptype_to_htype(u8 ptype) * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor + * @skb: skb currently being received and modified + * @rx_ptype: Rx packet type **/ static inline void i40e_rx_hash(struct i40e_ring *ring, union i40e_rx_desc *rx_desc, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 5790897eae2e..3b5a63b3236e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 449de4b0058e..094387db3c11 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40E_TYPE_H_ #define _I40E_TYPE_H_ @@ -1257,7 +1233,8 @@ struct i40e_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_EMP_MODULE_PTR 0x0F +#define I40E_SR_EMP_MODULE_PTR 0x48 #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 @@ -1273,6 +1250,9 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) +#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) +#define I40E_PTR_TYPE BIT(15) /* Shadow RAM related */ #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 @@ -1386,6 +1366,10 @@ enum i40e_reset_type { I40E_RESET_EMPR = 3, }; +/* IEEE 802.1AB LLDP Agent Variables from NVM */ +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 + /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 3a7a1e77bf39..96e537a35000 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #ifndef _I40EVF_H_ #define _I40EVF_H_ @@ -105,7 +81,6 @@ struct i40e_vsi { #define I40E_TX_DESC(R, i) (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) -#define MAX_QUEUES 16 #define I40EVF_MAX_REQ_QUEUES 4 #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c index da60ce12b33d..3cc9d60d0d72 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c @@ -1,4 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + #include <linux/list.h> #include <linux/errno.h> @@ -176,7 +178,6 @@ void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset) /** * i40evf_client_add_instance - add a client instance to the instance list * @adapter: pointer to the board struct - * @client: pointer to a client struct in the client list. * * Returns cinst ptr on success, NULL on failure **/ @@ -234,7 +235,6 @@ out: /** * i40evf_client_del_instance - removes a client instance from the list * @adapter: pointer to the board struct - * @client: pointer to the client struct * **/ static @@ -438,7 +438,7 @@ static u32 i40evf_client_virtchnl_send(struct i40e_info *ldev, * i40evf_client_setup_qvlist - send a message to the PF to setup iwarp qv map * @ldev: pointer to L2 context. * @client: Client pointer. - * @qv_info: queue and vector list + * @qvlist_info: queue and vector list * * Return 0 on success or < 0 on error **/ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/i40evf/i40evf_client.h index 15a10da5bd4a..5585f362048a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _I40E_CLIENT_H_ -#define _I40E_CLIENT_H_ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40EVF_CLIENT_H_ +#define _I40EVF_CLIENT_H_ #define I40EVF_CLIENT_STR_LENGTH 10 @@ -164,4 +166,4 @@ struct i40e_client { /* used by clients */ int i40evf_register_client(struct i40e_client *client); int i40evf_unregister_client(struct i40e_client *client); -#endif /* _I40E_CLIENT_H_ */ +#endif /* _I40EVF_CLIENT_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index dc4cde274fb8..69efe0aec76a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ /* ethtool support for i40evf */ #include "i40evf.h" @@ -226,7 +202,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) /** * i40evf_get_priv_flags - report device private flags - * @dev: network interface device structure + * @netdev: network interface device structure * * The get string set count and the string set should be matched for each * flag returned. Add new strings for each flag to the i40e_gstrings_priv_flags @@ -253,7 +229,7 @@ static u32 i40evf_get_priv_flags(struct net_device *netdev) /** * i40evf_set_priv_flags - set private flags - * @dev: network interface device structure + * @netdev: network interface device structure * @flags: bit flags to be set **/ static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags) @@ -627,6 +603,7 @@ static int i40evf_set_per_queue_coalesce(struct net_device *netdev, * i40evf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations * * Returns Success if the command is supported. **/ @@ -746,6 +723,7 @@ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) * @netdev: network interface device structure * @indir: indirection table * @key: hash key + * @hfunc: hash function in use * * Reads the indirection table directly from the hardware. Always returns 0. **/ @@ -774,6 +752,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, * @netdev: network interface device structure * @indir: indirection table * @key: hash key + * @hfunc: hash function to use * * Returns -EINVAL if the table specifies an inavlid queue id, otherwise * returns 0 after programming the table. diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5f71532be7f1..a7b87f935411 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40evf.h" #include "i40e_prototype.h" @@ -473,6 +449,7 @@ static void i40evf_irq_affinity_release(struct kref *ref) {} /** * i40evf_request_traffic_irqs - Initialize MSI-X interrupts * @adapter: board private structure + * @basename: device basename * * Allocates MSI-X vectors for tx and rx handling, and requests * interrupts from the kernel. @@ -705,7 +682,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) f = i40evf_find_vlan(adapter, vlan); if (!f) { - f = kzalloc(sizeof(*f), GFP_ATOMIC); + f = kzalloc(sizeof(*f), GFP_KERNEL); if (!f) goto clearout; @@ -745,6 +722,7 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) /** * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device * @netdev: network device struct + * @proto: unused protocol data * @vid: VLAN tag **/ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, @@ -762,6 +740,7 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev, /** * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device * @netdev: network device struct + * @proto: unused protocol data * @vid: VLAN tag **/ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, @@ -1946,7 +1925,8 @@ continue_reset: * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. */ - running = (adapter->state == __I40EVF_RUNNING); + running = ((adapter->state == __I40EVF_RUNNING) || + (adapter->state == __I40EVF_RESETTING)); if (running) { netif_carrier_off(netdev); @@ -2352,7 +2332,7 @@ static int i40evf_validate_ch_config(struct i40evf_adapter *adapter, total_max_rate += tx_rate; num_qps += mqprio_qopt->qopt.count[i]; } - if (num_qps > MAX_QUEUES) + if (num_qps > I40EVF_MAX_REQ_QUEUES) return -EINVAL; ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate); @@ -3160,7 +3140,7 @@ static int i40evf_set_features(struct net_device *netdev, /** * i40evf_features_check - Validate encapsulated packet conforms to limits * @skb: skb buff - * @netdev: This physical port's netdev + * @dev: This physical port's netdev * @features: Offload features that the stack believes apply **/ static netdev_features_t i40evf_features_check(struct sk_buff *skb, @@ -3378,6 +3358,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + /* Do not turn on offloads when they are requested to be turned off. + * TSO needs minimum 576 bytes to work correctly. + */ + if (netdev->wanted_features) { + if (!(netdev->wanted_features & NETIF_F_TSO) || + netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO; + if (!(netdev->wanted_features & NETIF_F_TSO6) || + netdev->mtu < 576) + netdev->features &= ~NETIF_F_TSO6; + if (!(netdev->wanted_features & NETIF_F_TSO_ECN)) + netdev->features &= ~NETIF_F_TSO_ECN; + if (!(netdev->wanted_features & NETIF_F_GRO)) + netdev->features &= ~NETIF_F_GRO; + if (!(netdev->wanted_features & NETIF_F_GSO)) + netdev->features &= ~NETIF_F_GSO; + } + adapter->vsi.id = adapter->vsi_res->vsi_id; adapter->vsi.back = adapter; @@ -3692,7 +3690,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), MAX_QUEUES); + netdev = alloc_etherdev_mq(sizeof(struct i40evf_adapter), + I40EVF_MAX_REQ_QUEUES); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 26a59890532f..565677de5ba3 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - * - * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ #include "i40evf.h" #include "i40e_prototype.h" @@ -179,8 +155,7 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) /** * i40evf_get_vf_config - * @hw: pointer to the hardware structure - * @len: length of buffer + * @adapter: private adapter structure * * Get VF configuration from PF and populate hw structure. Must be called after * admin queue is initialized. Busy waits until response is received from PF, @@ -423,8 +398,6 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num) /** * i40evf_add_ether_addrs * @adapter: adapter structure - * @addrs: the MAC address filters to add (contiguous) - * @count: number of filters * * Request that the PF add one or more addresses to our filters. **/ @@ -497,8 +470,6 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) /** * i40evf_del_ether_addrs * @adapter: adapter structure - * @addrs: the MAC address filters to remove (contiguous) - * @count: number of filtes * * Request that the PF remove one or more addresses from our filters. **/ @@ -571,8 +542,6 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) /** * i40evf_add_vlans * @adapter: adapter structure - * @vlans: the VLANs to add - * @count: number of VLANs * * Request that the PF add one or more VLAN filters to our VSI. **/ @@ -643,8 +612,6 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) /** * i40evf_del_vlans * @adapter: adapter structure - * @vlans: the VLANs to remove - * @count: number of VLANs * * Request that the PF remove one or more VLAN filters from our VSI. **/ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7dc5f045e969..7541ec2270b3 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1049,7 +1049,9 @@ struct ice_aqc_set_event_mask { * NVM Update commands (indirect 0x0703) */ struct ice_aqc_nvm { - u8 cmd_flags; + __le16 offset_low; + u8 offset_high; + u8 cmd_flags; #define ICE_AQC_NVM_LAST_CMD BIT(0) #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */ #define ICE_AQC_NVM_PRESERVATION_S 1 @@ -1058,12 +1060,11 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) #define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) - u8 module_typeid; - __le16 length; + __le16 module_typeid; + __le16 length; #define ICE_AQC_NVM_ERASE_LEN 0xFFFF - __le32 offset; - __le32 addr_high; - __le32 addr_low; + __le32 addr_high; + __le32 addr_low; }; /* Get/Set RSS key (indirect 0x0B04/0x0B02) */ diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index fa7a69ac92b0..92da0a626ce0 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -16,7 +16,7 @@ * Read the NVM using the admin queue commands (0x0701) */ static enum ice_status -ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length, +ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, void *data, bool last_command, struct ice_sq_cd *cd) { struct ice_aq_desc desc; @@ -33,8 +33,9 @@ ice_aq_read_nvm(struct ice_hw *hw, u8 module_typeid, u32 offset, u16 length, /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; - cmd->module_typeid = module_typeid; - cmd->offset = cpu_to_le32(offset); + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->offset_low = cpu_to_le16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; cmd->length = cpu_to_le16(length); return ice_aq_send_cmd(hw, &desc, data, length, cd); diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index c48583e98ac1..394c1e0656b9 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel 82575 PCI-Express Ethernet Linux driver -# Copyright(c) 1999 - 2014 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, see <http://www.gnu.org/licenses/>. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS <linux.nics@intel.com> -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) 82575 PCI-Express ethernet driver # diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index dd9b6cac220d..b13b42e5a1d9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* e1000_82575 * e1000_82576 diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index e53ebe97d709..6ad775b1a4c5 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 98534f765e0e..252440a418dc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ @@ -491,6 +470,8 @@ * manageability enabled, allowing us room for 15 multicast addresses. */ #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAH_ASEL_SRC_ADDR 0x00010000 +#define E1000_RAH_QSEL_ENABLE 0x10000000 #define E1000_RAL_MAC_ADDR_LEN 4 #define E1000_RAH_MAC_ADDR_LEN 2 #define E1000_RAH_POOL_MASK 0x03FC0000 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index ff835e1e853d..5d87957b2627 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -1,25 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index 6f548247e6d8..c54ebedca6da 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* e1000_i210 * e1000_i211 diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h index 56f015ccb206..5c437fdc49ee 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.h +++ b/drivers/net/ethernet/intel/igb/e1000_i210.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_I210_H_ #define _E1000_I210_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 298afa0d9159..79ee0a747260 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include <linux/if_ether.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index 04d80c765aee..6e110f28f922 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_MAC_H_ #define _E1000_MAC_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index ef42f1689b3b..46debd991bfe 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include "e1000_mbx.h" diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index 4f0ecd28354d..178e60ec71d4 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_MBX_H_ #define _E1000_MBX_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index e4596f151cd4..09f4dcb09632 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -1,25 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include <linux/if_ether.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h index dde68cd54a53..091cddf4ada8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.h +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_NVM_H_ #define _E1000_NVM_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 4ec61243da82..2be0e762ec69 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include <linux/if_ether.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 856d2cda0643..5894e4b1d0a8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_PHY_H_ #define _E1000_PHY_H_ diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index e8fa8c6530e0..0ad737d2f289 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 8dbc399b345e..9643b5b3d444 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -1,26 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ @@ -442,6 +421,8 @@ struct hwmon_buff { enum igb_filter_match_flags { IGB_FILTER_FLAG_ETHER_TYPE = 0x1, IGB_FILTER_FLAG_VLAN_TCI = 0x2, + IGB_FILTER_FLAG_SRC_MAC_ADDR = 0x4, + IGB_FILTER_FLAG_DST_MAC_ADDR = 0x8, }; #define IGB_MAX_RXNFC_FILTERS 16 @@ -456,11 +437,14 @@ struct igb_nfc_input { u8 match_flags; __be16 etype; __be16 vlan_tci; + u8 src_addr[ETH_ALEN]; + u8 dst_addr[ETH_ALEN]; }; struct igb_nfc_filter { struct hlist_node nfc_node; struct igb_nfc_input filter; + unsigned long cookie; u16 etype_reg_index; u16 sw_idx; u16 action; @@ -474,6 +458,8 @@ struct igb_mac_addr { #define IGB_MAC_STATE_DEFAULT 0x1 #define IGB_MAC_STATE_IN_USE 0x2 +#define IGB_MAC_STATE_SRC_ADDR 0x4 +#define IGB_MAC_STATE_QUEUE_STEERING 0x8 /* board specific private data structure */ struct igb_adapter { @@ -598,6 +584,7 @@ struct igb_adapter { /* RX network flow classification support */ struct hlist_head nfc_filter_list; + struct hlist_head cls_flower_list; unsigned int nfc_filter_count; /* lock for RX network flow classification filter */ spinlock_t nfc_lock; @@ -739,4 +726,9 @@ int igb_add_filter(struct igb_adapter *adapter, int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input); +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index e77ba0d5866d..2d798499d35e 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ /* ethtool support for igb */ @@ -2495,6 +2474,23 @@ static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, fsp->h_ext.vlan_tci = rule->filter.vlan_tci; fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); } + if (rule->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + rule->filter.dst_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + ether_addr_copy(fsp->h_u.ether_spec.h_source, + rule->filter.src_addr); + /* As we only support matching by the full + * mask, return the mask to userspace + */ + eth_broadcast_addr(fsp->m_u.ether_spec.h_source); + } + return 0; } return -EINVAL; @@ -2768,14 +2764,41 @@ static int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) { + struct e1000_hw *hw = &adapter->hw; int err = -EINVAL; + if (hw->mac.type == e1000_i210 && + !(input->filter.match_flags & ~IGB_FILTER_FLAG_SRC_MAC_ADDR)) { + dev_err(&adapter->pdev->dev, + "i210 doesn't support flow classification rules specifying only source addresses.\n"); + return -EOPNOTSUPP; + } + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { err = igb_rxnfc_write_etype_filter(adapter, input); if (err) return err; } + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.dst_addr, + input->action, 0); + err = min_t(int, err, 0); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) { + err = igb_add_mac_steering_filter(adapter, + input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + err = min_t(int, err, 0); + if (err) + return err; + } + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) err = igb_rxnfc_write_vlan_prio_filter(adapter, input); @@ -2824,6 +2847,15 @@ int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) igb_clear_vlan_prio_filter(adapter, ntohs(input->filter.vlan_tci)); + if (input->filter.match_flags & IGB_FILTER_FLAG_SRC_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.src_addr, + input->action, + IGB_MAC_STATE_SRC_ADDR); + + if (input->filter.match_flags & IGB_FILTER_FLAG_DST_MAC_ADDR) + igb_del_mac_steering_filter(adapter, input->filter.dst_addr, + input->action, 0); + return 0; } @@ -2865,7 +2897,7 @@ static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, /* add filter to the list */ if (parent) - hlist_add_behind(&parent->nfc_node, &input->nfc_node); + hlist_add_behind(&input->nfc_node, &parent->nfc_node); else hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); @@ -2905,10 +2937,6 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) return -EINVAL; - if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK && - fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) - return -EINVAL; - input = kzalloc(sizeof(*input), GFP_KERNEL); if (!input) return -ENOMEM; @@ -2918,6 +2946,20 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; } + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) { + input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, + fsp->h_u.ether_spec.h_source); + } + + /* Only support matching addresses by the full mask */ + if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) { + input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, + fsp->h_u.ether_spec.h_dest); + } + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { err = -EINVAL; diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index bebe43b3a836..3b83747b2700 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #include "igb.h" #include "e1000_82575.h" diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cce7ada89255..c33821d2afb3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1,26 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ +/* Copyright(c) 2007 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -36,6 +15,7 @@ #include <net/checksum.h> #include <net/ip6_checksum.h> #include <net/pkt_sched.h> +#include <net/pkt_cls.h> #include <linux/net_tstamp.h> #include <linux/mii.h> #include <linux/ethtool.h> @@ -2078,6 +2058,7 @@ int igb_up(struct igb_adapter *adapter) igb_assign_vector(adapter->q_vector[0], 0); /* Clear any pending interrupts. */ + rd32(E1000_TSICR); rd32(E1000_ICR); igb_irq_enable(adapter); @@ -2513,6 +2494,250 @@ static int igb_offload_cbs(struct igb_adapter *adapter, return 0; } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_PRIO_FULL_MASK (0x07) + +static int igb_parse_cls_flower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *f, + int traffic_class, + struct igb_nfc_filter *input) +{ + struct netlink_ext_ack *extack = f->common.extack; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported"); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + if (!is_zero_ether_addr(mask->dst)) { + if (!is_broadcast_ether_addr(mask->dst)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_DST_MAC_ADDR; + ether_addr_copy(input->filter.dst_addr, key->dst); + } + + if (!is_zero_ether_addr(mask->src)) { + if (!is_broadcast_ether_addr(mask->src)) { + NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); + return -EINVAL; + } + + input->filter.match_flags |= + IGB_FILTER_FLAG_SRC_MAC_ADDR; + ether_addr_copy(input->filter.src_addr, key->src); + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + + if (mask->n_proto) { + if (mask->n_proto != ETHER_TYPE_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; + input->filter.etype = key->n_proto; + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key, *mask; + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->mask); + + if (mask->vlan_priority) { + if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); + return -EINVAL; + } + + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + input->filter.vlan_tci = key->vlan_priority; + } + } + + input->action = traffic_class; + input->cookie = f->cookie; + + return 0; +} + +static int igb_configure_clsflower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + struct netlink_ext_ack *extack = cls_flower->common.extack; + struct igb_nfc_filter *filter, *f; + int err, tc; + + tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid); + if (tc < 0) { + NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class"); + return -EINVAL; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + err = igb_parse_cls_flower(adapter, cls_flower, tc, filter); + if (err < 0) + goto err_parse; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in ethtool"); + goto err_locked; + } + } + + hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) { + if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) { + err = -EEXIST; + NL_SET_ERR_MSG_MOD(extack, + "This filter is already set in cls_flower"); + goto err_locked; + } + } + + err = igb_add_filter(adapter, filter); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter"); + goto err_locked; + } + + hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list); + + spin_unlock(&adapter->nfc_lock); + + return 0; + +err_locked: + spin_unlock(&adapter->nfc_lock); + +err_parse: + kfree(filter); + + return err; +} + +static int igb_delete_clsflower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + struct igb_nfc_filter *filter; + int err; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node) + if (filter->cookie == cls_flower->cookie) + break; + + if (!filter) { + err = -ENOENT; + goto out; + } + + err = igb_erase_filter(adapter, filter); + if (err < 0) + goto out; + + hlist_del(&filter->nfc_node); + kfree(filter); + +out: + spin_unlock(&adapter->nfc_lock); + + return err; +} + +static int igb_setup_tc_cls_flower(struct igb_adapter *adapter, + struct tc_cls_flower_offload *cls_flower) +{ + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return igb_configure_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_DESTROY: + return igb_delete_clsflower(adapter, cls_flower); + case TC_CLSFLOWER_STATS: + return -EOPNOTSUPP; + default: + return -EINVAL; + } +} + +static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct igb_adapter *adapter = cb_priv; + + if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return igb_setup_tc_cls_flower(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int igb_setup_tc_block(struct igb_adapter *adapter, + struct tc_block_offload *f) +{ + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, igb_setup_tc_block_cb, + adapter, adapter); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb, + adapter); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -2521,6 +2746,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); + case TC_SETUP_BLOCK: + return igb_setup_tc_block(adapter, type_data); default: return -EOPNOTSUPP; @@ -2822,6 +3049,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CRC; + if (hw->mac.type >= e1000_i350) + netdev->features |= NETIF_F_HW_TC; + #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ NETIF_F_GSO_GRE_CSUM | \ NETIF_F_GSO_IPXIP4 | \ @@ -3636,6 +3866,7 @@ static int __igb_open(struct net_device *netdev, bool resuming) napi_enable(&(adapter->q_vector[i]->napi)); /* Clear any pending interrupts. */ + rd32(E1000_TSICR); rd32(E1000_ICR); igb_irq_enable(adapter); @@ -3824,11 +4055,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, u64 tdba = ring->dma; int reg_idx = ring->reg_idx; - /* disable the queue */ - wr32(E1000_TXDCTL(reg_idx), 0); - wrfl(); - mdelay(10); - wr32(E1000_TDLEN(reg_idx), ring->count * sizeof(union e1000_adv_tx_desc)); wr32(E1000_TDBAL(reg_idx), @@ -3859,8 +4085,16 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, **/ static void igb_configure_tx(struct igb_adapter *adapter) { + struct e1000_hw *hw = &adapter->hw; int i; + /* disable the queues */ + for (i = 0; i < adapter->num_tx_queues; i++) + wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0); + + wrfl(); + usleep_range(10000, 20000); + for (i = 0; i < adapter->num_tx_queues; i++) igb_configure_tx_ring(adapter, adapter->tx_ring[i]); } @@ -6856,8 +7090,35 @@ static void igb_set_default_mac_filter(struct igb_adapter *adapter) igb_rar_set_index(adapter, 0); } -static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, - const u8 queue) +/* If the filter to be added and an already existing filter express + * the same address and address type, it should be possible to only + * override the other configurations, for example the queue to steer + * traffic. + */ +static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry, + const u8 *addr, const u8 flags) +{ + if (!(entry->state & IGB_MAC_STATE_IN_USE)) + return true; + + if ((entry->state & IGB_MAC_STATE_SRC_ADDR) != + (flags & IGB_MAC_STATE_SRC_ADDR)) + return false; + + if (!ether_addr_equal(addr, entry->addr)) + return false; + + return true; +} + +/* Add a MAC filter for 'addr' directing matching traffic to 'queue', + * 'flags' is used to indicate what kind of match is made, match is by + * default for the destination address, if matching by source address + * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_add_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) { struct e1000_hw *hw = &adapter->hw; int rar_entries = hw->mac.rar_entry_count - @@ -6872,12 +7133,13 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, * addresses. */ for (i = 0; i < rar_entries; i++) { - if (adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) + if (!igb_mac_entry_can_be_used(&adapter->mac_table[i], + addr, flags)) continue; ether_addr_copy(adapter->mac_table[i].addr, addr); adapter->mac_table[i].queue = queue; - adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags; igb_rar_set_index(adapter, i); return i; @@ -6886,9 +7148,22 @@ static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, return -ENOSPC; } -static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, +static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr, const u8 queue) { + return igb_add_mac_filter_flags(adapter, addr, queue, 0); +} + +/* Remove a MAC filter for 'addr' directing matching traffic to + * 'queue', 'flags' is used to indicate what kind of match need to be + * removed, match is by default for the destination address, if + * matching by source address is to be removed the flag + * IGB_MAC_STATE_SRC_ADDR can be used. + */ +static int igb_del_mac_filter_flags(struct igb_adapter *adapter, + const u8 *addr, const u8 queue, + const u8 flags) +{ struct e1000_hw *hw = &adapter->hw; int rar_entries = hw->mac.rar_entry_count - adapter->vfs_allocated_count; @@ -6904,14 +7179,26 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, for (i = 0; i < rar_entries; i++) { if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE)) continue; + if ((adapter->mac_table[i].state & flags) != flags) + continue; if (adapter->mac_table[i].queue != queue) continue; if (!ether_addr_equal(adapter->mac_table[i].addr, addr)) continue; - adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); - adapter->mac_table[i].queue = 0; + /* When a filter for the default address is "deleted", + * we return it to its initial configuration + */ + if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) { + adapter->mac_table[i].state = + IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE; + adapter->mac_table[i].queue = + adapter->vfs_allocated_count; + } else { + adapter->mac_table[i].state = 0; + adapter->mac_table[i].queue = 0; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + } igb_rar_set_index(adapter, i); return 0; @@ -6920,6 +7207,34 @@ static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, return -ENOENT; } +static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr, + const u8 queue) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, 0); +} + +int igb_add_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + struct e1000_hw *hw = &adapter->hw; + + /* In theory, this should be supported on 82575 as well, but + * that part wasn't easily accessible during development. + */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + return igb_add_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + +int igb_del_mac_steering_filter(struct igb_adapter *adapter, + const u8 *addr, u8 queue, u8 flags) +{ + return igb_del_mac_filter_flags(adapter, addr, queue, + IGB_MAC_STATE_QUEUE_STEERING | flags); +} + static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -8763,12 +9078,24 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) if (is_valid_ether_addr(addr)) rar_high |= E1000_RAH_AV; - if (hw->mac.type == e1000_82575) + if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR) + rar_high |= E1000_RAH_ASEL_SRC_ADDR; + + switch (hw->mac.type) { + case e1000_82575: + case e1000_i210: + if (adapter->mac_table[index].state & + IGB_MAC_STATE_QUEUE_STEERING) + rar_high |= E1000_RAH_QSEL_ENABLE; + rar_high |= E1000_RAH_POOL_1 * adapter->mac_table[index].queue; - else + break; + default: rar_high |= E1000_RAH_POOL_1 << adapter->mac_table[index].queue; + break; + } } wr32(E1000_RAL(index), rar_low); @@ -9206,6 +9533,9 @@ static void igb_nfc_filter_exit(struct igb_adapter *adapter) hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) igb_erase_filter(adapter, rule); + hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node) + igb_erase_filter(adapter, rule); + spin_unlock(&adapter->nfc_lock); } diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 7454b9895a65..9f4d700e09df 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1,21 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ -/* PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580 - * - * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see <http://www.gnu.org/licenses/>. - */ +/* Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com> */ + #include <linux/module.h> #include <linux/device.h> #include <linux/pci.h> diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile index efe29dae384a..afd3e36eae75 100644 --- a/drivers/net/ethernet/intel/igbvf/Makefile +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel(R) 82576 Virtual Function Linux driver -# Copyright(c) 2009 - 2012 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 2009 - 2018 Intel Corporation. # # Makefile for the Intel(R) 82576 VF ethernet driver # diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index 04bcfec0641b..4437f832412d 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index ca39e3cccaeb..3ae358b35227 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ /* ethtool support for igbvf */ diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index f5bf248e22eb..eee26a3be90b 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ /* Linux PRO/1000 Ethernet Driver main header file */ diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 9195884096f8..163e5838f7c2 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #include "mbx.h" diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h index 479b062fe9ee..e5b31818d565 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.h +++ b/drivers/net/ethernet/intel/igbvf/mbx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 1999 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _E1000_MBX_H_ #define _E1000_MBX_H_ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index e2b7502f1953..f818f060e5a7 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h index 614e52409f11..625a309a3355 100644 --- a/drivers/net/ethernet/intel/igbvf/regs.h +++ b/drivers/net/ethernet/intel/igbvf/regs.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index bfe8d8297b2e..b8ba3f94c363 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -1,29 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #include "vf.h" diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 193b50026246..c71b0d7dbcee 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel(R) 82576 Virtual Function Linux driver - Copyright(c) 2009 - 2012 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2009 - 2018 Intel Corporation. */ #ifndef _E1000_VF_H_ #define _E1000_VF_H_ diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile index 1b42dd554dd2..2433e9300a33 100644 --- a/drivers/net/ethernet/intel/ixgb/Makefile +++ b/drivers/net/ethernet/intel/ixgb/Makefile @@ -1,33 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel PRO/10GbE Linux driver # Copyright(c) 1999 - 2008 Intel Corporation. # -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS <linux.nics@intel.com> -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - -# # Makefile for the Intel(R) PRO/10GbE ethernet driver # diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h index 92022841755f..e85271b68410 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_H_ #define _IXGB_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c index eca216b9b859..129286fc1634 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h index 475297a810fe..3ee0a09e5d0a 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_EE_H_ #define _IXGB_EE_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index d10a0d242dda..43744bf0fc1c 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ /* ethtool support for ixgb */ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c index bf9a220f71fb..cbaa933ef30d 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ /* ixgb_hw.c * Shared functions for accessing and configuring the adapter diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h index 19f36d87ef61..6064583095da 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_HW_H_ #define _IXGB_HW_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h index 24e849902d60..9695b8215f01 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #ifndef _IXGB_IDS_H_ #define _IXGB_IDS_H_ diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 2353c383f0a7..62f2173bc20e 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h index b1710379192e..7bd54efa698d 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2008 Intel Corporation. */ /* glue for the OS independent part of ixgb * includes register access macros diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c index 04a60640ddda..f0cadd532c53 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel PRO/10GbE Linux driver - Copyright(c) 1999 - 2008 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2008 Intel Corporation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 4cd96c88cb5d..5414685189ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -1,32 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel 10 Gigabit PCI Express Linux driver -# Copyright(c) 1999 - 2013 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# Linux NICS <linux.nics@intel.com> -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) 10GbE PCI Express ethernet driver # diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4f08c712e58e..fc534e91c6b2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_H_ #define _IXGBE_H_ @@ -241,8 +215,7 @@ struct ixgbe_tx_buffer { unsigned long time_stamp; union { struct sk_buff *skb; - /* XDP uses address ptr on irq_clean */ - void *data; + struct xdp_frame *xdpf; }; unsigned int bytecount; unsigned short gso_segs; @@ -306,7 +279,6 @@ enum ixgbe_ring_state_t { struct ixgbe_fwd_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct net_device *netdev; - struct ixgbe_adapter *real_adapter; unsigned int tx_base_queue; unsigned int rx_base_queue; int pool; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index cb0fe5fedb33..eee277c1bedf 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 66a74f4651e8..1e49716f52bc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -1462,7 +1436,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; - u32 bucket_hash = 0, hi_dword = 0; + u32 bucket_hash = 0; + __be32 hi_dword = 0; int i; /* Apply masks to input data */ @@ -1501,7 +1476,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, * Limit hash to 13 bits since max bucket count is 8K. * Store result at the end of the input stream. */ - input->formatted.bkt_hash = bucket_hash & 0x1FFF; + input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); } /** @@ -1610,7 +1585,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, return IXGBE_ERR_CONFIG; } - switch (input_mask->formatted.flex_bytes & 0xFFFF) { + switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: /* Mask Flex Bytes */ fdirm |= IXGBE_FDIRM_FLEX; @@ -1680,13 +1655,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); /* record vlan (little-endian) and flex_bytes(big-endian) */ - fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; fdirvlan |= ntohs(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; + fdirhash = (__force u32)input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); @@ -1724,7 +1699,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, s32 err; /* configure FDIRHASH register */ - fdirhash = input->formatted.bkt_hash; + fdirhash = (__force u32)input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 633be93f3dbb..3f5c350716bb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> @@ -3652,7 +3626,7 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(buffer[i])); + i, (__force u32)cpu_to_le32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2b311382167a..4b531e8ae38a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_COMMON_H_ #define _IXGBE_COMMON_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index aaea8282bfd2..d26cea5b43bd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,31 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ - +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 73b6362d4327..60cd5863bf5e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _DCB_CONFIG_H_ #define _DCB_CONFIG_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index 085130626330..379ae747cdce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -1,31 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index 7edce607f901..fdca41abb44c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _DCB_82598_CONFIG_H_ #define _DCB_82598_CONFIG_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 1eed6811e914..7948849840a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index fa030f0abc18..c6f084883cab 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _DCB_82599_CONFIG_H_ #define _DCB_82599_CONFIG_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index b33f3f87e4b1..c00332d2e02a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include <linux/dcbnl.h> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c index ad54080488ee..50dfb02fa34c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -1,30 +1,6 @@ -/******************************************************************************* +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ #include <linux/debugfs.h> #include <linux/module.h> @@ -34,15 +10,9 @@ static struct dentry *ixgbe_dbg_root; static char ixgbe_dbg_reg_ops_buf[256] = ""; -/** - * ixgbe_dbg_reg_ops_read - read for reg_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) +static ssize_t ixgbe_dbg_common_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos, + char *dbg_buf) { struct ixgbe_adapter *adapter = filp->private_data; char *buf; @@ -53,8 +23,7 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, return 0; buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_reg_ops_buf); + adapter->netdev->name, dbg_buf); if (!buf) return -ENOMEM; @@ -70,6 +39,20 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, } /** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, + ixgbe_dbg_reg_ops_buf); +} + +/** * ixgbe_dbg_reg_ops_write - write into reg_ops datum * @filp: the opened file * @buffer: where to find the user's data @@ -145,33 +128,11 @@ static char ixgbe_dbg_netdev_ops_buf[256] = ""; * @count: the size of the user's buffer * @ppos: file position offset **/ -static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, - char __user *buffer, +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { - struct ixgbe_adapter *adapter = filp->private_data; - char *buf; - int len; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", - adapter->netdev->name, - ixgbe_dbg_netdev_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - return len; + return ixgbe_dbg_common_ops_read(filp, buffer, count, ppos, + ixgbe_dbg_netdev_ops_buf); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index c0e6ab42e0e1..bdd179c29ea4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for ixgbe */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 7a09a40e4472..94b3165ff543 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include <linux/if_ether.h> @@ -465,7 +440,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); - ddp->err = ddp_err; + ddp->err = (__force u32)ddp_err; ddp->sgl = NULL; ddp->sgc = 0; /* fall through */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index cf1919901514..724f5382329f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_FCOE_H #define _IXGBE_FCOE_H diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index cead23e3db0c..344a1f213a5f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -1,29 +1,5 @@ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ #include "ixgbe.h" #include <net/xfrm.h> @@ -43,8 +19,9 @@ static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, int i; for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i])); - IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt)); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), + (__force u32)cpu_to_be32(key[3 - i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt)); IXGBE_WRITE_FLUSH(hw); reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); @@ -93,7 +70,8 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, int i; /* store the SPI (in bigendian) and IPidx */ - IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi)); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, + (__force u32)cpu_to_le32((__force u32)spi)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); IXGBE_WRITE_FLUSH(hw); @@ -101,8 +79,9 @@ static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, /* store the key, salt, and mode */ for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i])); - IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt)); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), + (__force u32)cpu_to_be32(key[3 - i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt)); IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); IXGBE_WRITE_FLUSH(hw); @@ -121,7 +100,8 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) /* store the ip address */ for (i = 0; i < 4; i++) - IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i])); + IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), + (__force u32)cpu_to_le32((__force u32)addr[i])); IXGBE_WRITE_FLUSH(hw); ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); @@ -391,7 +371,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, struct xfrm_state *ret = NULL; rcu_read_lock(); - hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, + (__force u32)spi) { if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, @@ -401,6 +382,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, xfrm_state_hold(ret); break; } + } rcu_read_unlock(); return ret; } @@ -463,6 +445,89 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, } /** + * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters + * @xs: pointer to transformer state struct + **/ +static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + u32 mfval, manc, reg; + int num_filters = 4; + bool manc_ipv4; + u32 bmcipval; + int i, j; + +#define MANC_EN_IPV4_FILTER BIT(24) +#define MFVAL_IPV4_FILTER_SHIFT 16 +#define MFVAL_IPV6_FILTER_SHIFT 24 +#define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4)) + +#define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4)) +#define IXGBE_BMCIPVAL 0x5060 +#define BMCIP_V4 0x2 +#define BMCIP_V6 0x3 +#define BMCIP_MASK 0x3 + + manc = IXGBE_READ_REG(hw, IXGBE_MANC); + manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); + mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); + bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL); + + if (xs->props.family == AF_INET) { + /* are there any IPv4 filters to check? */ + if (manc_ipv4) { + /* the 4 ipv4 filters are all in MIPAF(3, i) */ + for (i = 0; i < num_filters; i++) { + if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i))) + continue; + + reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i)); + if (reg == xs->id.daddr.a4) + return 1; + } + } + + if ((bmcipval & BMCIP_MASK) == BMCIP_V4) { + reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3)); + if (reg == xs->id.daddr.a4) + return 1; + } + + } else { + /* if there are ipv4 filters, they are in the last ipv6 slot */ + if (manc_ipv4) + num_filters = 3; + + for (i = 0; i < num_filters; i++) { + if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i))) + continue; + + for (j = 0; j < 4; j++) { + reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j)); + if (reg != xs->id.daddr.a6[j]) + break; + } + if (j == 4) /* did we match all 4 words? */ + return 1; + } + + if ((bmcipval & BMCIP_MASK) == BMCIP_V6) { + for (j = 0; j < 4; j++) { + reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j)); + if (reg != xs->id.daddr.a6[j]) + break; + } + if (j == 4) /* did we match all 4 words? */ + return 1; + } + } + + return 0; +} + +/** * ixgbe_ipsec_add_sa - program device with a security association * @xs: pointer to transformer state struct **/ @@ -483,6 +548,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + if (ixgbe_ipsec_check_mgmt_ip(xs)) { + netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); + return -EINVAL; + } + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa rsa; @@ -593,7 +663,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) /* hash the new entry for faster search in Rx path */ hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, - rsa.xs->id.spi); + (__force u32)rsa.xs->id.spi); } else { struct tx_sa tsa; @@ -677,7 +747,8 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) if (!ipsec->ip_tbl[ipi].ref_cnt) { memset(&ipsec->ip_tbl[ipi], 0, sizeof(struct rx_ip_sa)); - ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf); + ixgbe_ipsec_set_rx_ip(hw, ipi, + (__force __be32 *)zerobuf); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index 4f099f516645..9ef7faadda69 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -1,30 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program. If not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ #ifndef _IXGBE_IPSEC_H_ #define _IXGBE_IPSEC_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index ed4cbe94c355..893a9206e718 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_sriov.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2ecd55856c50..38b4e4899490 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -752,8 +727,8 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) ring_desc = ""; pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p%s", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), + le64_to_cpu((__force __le64)u0->a), + le64_to_cpu((__force __le64)u0->b), (u64)dma_unmap_addr(tx_buffer, dma), dma_unmap_len(tx_buffer, len), tx_buffer->next_to_watch, @@ -864,15 +839,15 @@ rx_ring_summary: /* Descriptor Done */ pr_info("RWB[0x%03X] %016llX %016llX ---------------- %p%s\n", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), + le64_to_cpu((__force __le64)u0->a), + le64_to_cpu((__force __le64)u0->b), rx_buffer_info->skb, ring_desc); } else { pr_info("R [0x%03X] %016llX %016llX %016llX %p%s\n", i, - le64_to_cpu(u0->a), - le64_to_cpu(u0->b), + le64_to_cpu((__force __le64)u0->a), + le64_to_cpu((__force __le64)u0->b), (u64)rx_buffer_info->dma, rx_buffer_info->skb, ring_desc); @@ -1216,7 +1191,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, /* free the skb */ if (ring_is_xdp(tx_ring)) - page_frag_free(tx_buffer->data); + xdp_return_frame(tx_buffer->xdpf); else napi_consume_skb(tx_buffer->skb, napi_budget); @@ -1768,15 +1743,14 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) ixgbe_ipsec_rx(rx_ring, rx_desc, skb); - skb->protocol = eth_type_trans(skb, dev); - /* record Rx queue, or update MACVLAN statistics */ if (netif_is_ixgbe(dev)) skb_record_rx_queue(skb, rx_ring->queue_index); else macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, true, - (skb->pkt_type == PACKET_BROADCAST) || - (skb->pkt_type == PACKET_MULTICAST)); + false); + + skb->protocol = eth_type_trans(skb, dev); } static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, @@ -2262,7 +2236,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, #define IXGBE_XDP_TX 2 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_buff *xdp); + struct xdp_frame *xdpf); static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, @@ -2270,6 +2244,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; u32 act; rcu_read_lock(); @@ -2278,12 +2253,19 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, if (!xdp_prog) goto xdp_out; + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: break; case XDP_TX: - result = ixgbe_xmit_xdp_ring(adapter, xdp); + xdpf = convert_to_xdp_frame(xdp); + if (unlikely(!xdpf)) { + result = IXGBE_XDP_CONSUMED; + break; + } + result = ixgbe_xmit_xdp_ring(adapter, xdpf); break; case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); @@ -4211,7 +4193,8 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 reg_offset, vf_shift; + u16 pool = adapter->num_rx_pools; + u32 reg_offset, vf_shift, vmolr; u32 gcr_ext, vmdctl; int i; @@ -4225,6 +4208,13 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) vmdctl |= IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); + /* accept untagged packets until a vlan tag is + * specifically set for the VMDQ queue/pool + */ + vmolr = IXGBE_VMOLR_AUPE; + while (pool--) + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr); + vf_shift = VMDQ_P(0) % 32; reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; @@ -4892,36 +4882,6 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, return -ENOMEM; } -/** - * ixgbe_write_uc_addr_list - write unicast addresses to RAR table - * @netdev: network interface device structure - * @vfn: pool to associate with unicast addresses - * - * Writes unicast address list to the RAR table. - * Returns: -ENOMEM on failure/insufficient address space - * 0 on no addresses written - * X on writing X addresses to the RAR table - **/ -static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int count = 0; - - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) - return -ENOMEM; - - if (!netdev_uc_empty(netdev)) { - struct netdev_hw_addr *ha; - netdev_for_each_uc_addr(ha, netdev) { - ixgbe_del_mac_filter(adapter, ha->addr, vfn); - ixgbe_add_mac_filter(adapter, ha->addr, vfn); - count++; - } - } - return count; -} - static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -5301,29 +5261,6 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) spin_unlock(&adapter->fdir_perfect_lock); } -static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, - struct ixgbe_adapter *adapter) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 vmolr; - - /* No unicast promiscuous support for VMDQ devices. */ - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); - vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); - - /* clear the affected bit */ - vmolr &= ~IXGBE_VMOLR_MPE; - - if (dev->flags & IFF_ALLMULTI) { - vmolr |= IXGBE_VMOLR_MPE; - } else { - vmolr |= IXGBE_VMOLR_ROMPE; - hw->mac.ops.update_mc_addr_list(hw, dev); - } - ixgbe_write_uc_addr_list(adapter->netdev, pool); - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); -} - /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from @@ -5376,21 +5313,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) rx_ring->next_to_use = 0; } -static int ixgbe_fwd_ring_up(struct net_device *vdev, +static int ixgbe_fwd_ring_up(struct ixgbe_adapter *adapter, struct ixgbe_fwd_adapter *accel) { - struct ixgbe_adapter *adapter = accel->real_adapter; + struct net_device *vdev = accel->netdev; int i, baseq, err; - if (!test_bit(accel->pool, adapter->fwd_bitmask)) - return 0; - baseq = accel->pool * adapter->num_rx_queues_per_pool; netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", accel->pool, adapter->num_rx_pools, baseq, baseq + adapter->num_rx_queues_per_pool); - accel->netdev = vdev; accel->rx_base_queue = baseq; accel->tx_base_queue = baseq; @@ -5407,26 +5340,36 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, */ err = ixgbe_add_mac_filter(adapter, vdev->dev_addr, VMDQ_P(accel->pool)); - if (err >= 0) { - ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); + if (err >= 0) return 0; - } + + /* if we cannot add the MAC rule then disable the offload */ + macvlan_release_l2fw_offload(vdev); for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->rx_ring[baseq + i]->netdev = NULL; + netdev_err(vdev, "L2FW offload disabled due to L2 filter error\n"); + + clear_bit(accel->pool, adapter->fwd_bitmask); + kfree(accel); + return err; } -static int ixgbe_upper_dev_walk(struct net_device *upper, void *data) +static int ixgbe_macvlan_up(struct net_device *vdev, void *data) { - if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + struct ixgbe_adapter *adapter = data; + struct ixgbe_fwd_adapter *accel; - if (dfwd->fwd_priv) - ixgbe_fwd_ring_up(upper, vadapter); - } + if (!netif_is_macvlan(vdev)) + return 0; + + accel = macvlan_accel_priv(vdev); + if (!accel) + return 0; + + ixgbe_fwd_ring_up(adapter, accel); return 0; } @@ -5434,7 +5377,7 @@ static int ixgbe_upper_dev_walk(struct net_device *upper, void *data) static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) { netdev_walk_all_upper_dev_rcu(adapter->netdev, - ixgbe_upper_dev_walk, NULL); + ixgbe_macvlan_up, adapter); } static void ixgbe_configure(struct ixgbe_adapter *adapter) @@ -5797,7 +5740,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) /* Free all the Tx ring sk_buffs */ if (ring_is_xdp(tx_ring)) - page_frag_free(tx_buffer->data); + xdp_return_frame(tx_buffer->xdpf); else dev_kfree_skb_any(tx_buffer->skb); @@ -6370,7 +6313,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); int ring_node = -1; - int size; + int size, err; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; @@ -6407,6 +6350,13 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->queue_index) < 0) goto err; + err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + goto err; + } + rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -7671,17 +7621,19 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) return; + rtnl_lock(); /* If we're already down, removing or resetting, just bail */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_REMOVING, &adapter->state) || - test_bit(__IXGBE_RESETTING, &adapter->state)) + test_bit(__IXGBE_RESETTING, &adapter->state)) { + rtnl_unlock(); return; + } ixgbe_dump(adapter); netdev_err(adapter->netdev, "Reset adapter\n"); adapter->tx_timeout_count++; - rtnl_lock(); ixgbe_reinit_locked(adapter); rtnl_unlock(); } @@ -7801,7 +7753,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* remove payload length from inner checksum */ paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; @@ -8336,7 +8288,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, - struct xdp_buff *xdp) + struct xdp_frame *xdpf) { struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; @@ -8345,12 +8297,12 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, dma_addr_t dma; u16 i; - len = xdp->data_end - xdp->data; + len = xdpf->len; if (unlikely(!ixgbe_desc_unused(ring))) return IXGBE_XDP_CONSUMED; - dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); + dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); if (dma_mapping_error(ring->dev, dma)) return IXGBE_XDP_CONSUMED; @@ -8365,7 +8317,8 @@ static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); - tx_buffer->data = xdp->data; + tx_buffer->xdpf = xdpf; + tx_desc->read.buffer_addr = cpu_to_le64(dma); /* put descriptor type bits */ @@ -8827,6 +8780,49 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) } #endif /* CONFIG_IXGBE_DCB */ +static int ixgbe_reassign_macvlan_pool(struct net_device *vdev, void *data) +{ + struct ixgbe_adapter *adapter = data; + struct ixgbe_fwd_adapter *accel; + int pool; + + /* we only care about macvlans... */ + if (!netif_is_macvlan(vdev)) + return 0; + + /* that have hardware offload enabled... */ + accel = macvlan_accel_priv(vdev); + if (!accel) + return 0; + + /* If we can relocate to a different bit do so */ + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); + if (pool < adapter->num_rx_pools) { + set_bit(pool, adapter->fwd_bitmask); + accel->pool = pool; + return 0; + } + + /* if we cannot find a free pool then disable the offload */ + netdev_err(vdev, "L2FW offload disabled due to lack of queue resources\n"); + macvlan_release_l2fw_offload(vdev); + kfree(accel); + + return 0; +} + +static void ixgbe_defrag_macvlan_pools(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* flush any stale bits out of the fwd bitmask */ + bitmap_clear(adapter->fwd_bitmask, 1, 63); + + /* walk through upper devices reassigning pools */ + netdev_walk_all_upper_dev_rcu(dev, ixgbe_reassign_macvlan_pool, + adapter); +} + /** * ixgbe_setup_tc - configure net_device for multiple traffic classes * @@ -8894,6 +8890,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) #endif /* CONFIG_IXGBE_DCB */ ixgbe_init_interrupt_scheme(adapter); + ixgbe_defrag_macvlan_pools(dev); + if (netif_running(dev)) return ixgbe_open(dev); @@ -8998,13 +8996,12 @@ struct upper_walk_data { static int get_macvlan_queue(struct net_device *upper, void *_data) { if (netif_is_macvlan(upper)) { - struct macvlan_dev *dfwd = netdev_priv(upper); - struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + struct ixgbe_fwd_adapter *vadapter = macvlan_accel_priv(upper); struct upper_walk_data *data = _data; struct ixgbe_adapter *adapter = data->adapter; int ifindex = data->ifindex; - if (vadapter && vadapter->netdev->ifindex == ifindex) { + if (vadapter && upper->ifindex == ifindex) { data->queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; data->action = data->queue; return 1; @@ -9108,7 +9105,8 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, for (j = 0; field_ptr[j].val; j++) { if (field_ptr[j].off == off) { - field_ptr[j].val(input, mask, val, m); + field_ptr[j].val(input, mask, (__force u32)val, + (__force u32)m); input->filter.formatted.flow_type |= field_ptr[j].type; found_entry = true; @@ -9117,8 +9115,10 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, } if (nexthdr) { if (nexthdr->off == cls->knode.sel->keys[i].off && - nexthdr->val == cls->knode.sel->keys[i].val && - nexthdr->mask == cls->knode.sel->keys[i].mask) + nexthdr->val == + (__force u32)cls->knode.sel->keys[i].val && + nexthdr->mask == + (__force u32)cls->knode.sel->keys[i].mask) found_jump_field = true; else continue; @@ -9222,7 +9222,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, for (i = 0; nexthdr[i].jump; i++) { if (nexthdr[i].o != cls->knode.sel->offoff || nexthdr[i].s != cls->knode.sel->offshift || - nexthdr[i].m != cls->knode.sel->offmask) + nexthdr[i].m != + (__force u32)cls->knode.sel->offmask) return err; jump = kzalloc(sizeof(*jump), GFP_KERNEL); @@ -9443,6 +9444,22 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, return features; } +static void ixgbe_reset_l2fw_offload(struct ixgbe_adapter *adapter) +{ + int rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); + + /* go back to full RSS if we're not running SR-IOV */ + if (!adapter->ring_feature[RING_F_VMDQ].offset) + adapter->flags &= ~(IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED); + + adapter->ring_feature[RING_F_RSS].limit = rss; + adapter->ring_feature[RING_F_VMDQ].limit = 1; + + ixgbe_setup_tc(adapter->netdev, adapter->hw_tcs); +} + static int ixgbe_set_features(struct net_device *netdev, netdev_features_t features) { @@ -9523,7 +9540,9 @@ static int ixgbe_set_features(struct net_device *netdev, } } - if (need_reset) + if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) + ixgbe_reset_l2fw_offload(adapter); + else if (need_reset) ixgbe_do_reset(netdev); else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) @@ -9786,71 +9805,98 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) { - struct ixgbe_fwd_adapter *fwd_adapter = NULL; struct ixgbe_adapter *adapter = netdev_priv(pdev); - int used_pools = adapter->num_vfs + adapter->num_rx_pools; + struct ixgbe_fwd_adapter *accel; int tcs = adapter->hw_tcs ? : 1; - unsigned int limit; int pool, err; - /* Hardware has a limited number of available pools. Each VF, and the - * PF require a pool. Check to ensure we don't attempt to use more - * then the available number of pools. + /* The hardware supported by ixgbe only filters on the destination MAC + * address. In order to avoid issues we only support offloading modes + * where the hardware can actually provide the functionality. */ - if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) - return ERR_PTR(-EINVAL); + if (!macvlan_supports_dest_filter(vdev)) + return ERR_PTR(-EMEDIUMTYPE); + + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); + if (pool == adapter->num_rx_pools) { + u16 used_pools = adapter->num_vfs + adapter->num_rx_pools; + u16 reserved_pools; + + if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || + adapter->num_rx_pools > IXGBE_MAX_MACVLANS) + return ERR_PTR(-EBUSY); + + /* Hardware has a limited number of available pools. Each VF, + * and the PF require a pool. Check to ensure we don't + * attempt to use more then the available number of pools. + */ + if (used_pools >= IXGBE_MAX_VF_FUNCTIONS) + return ERR_PTR(-EBUSY); - if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || - (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) - return ERR_PTR(-EBUSY); + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | + IXGBE_FLAG_SRIOV_ENABLED; - fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL); - if (!fwd_adapter) - return ERR_PTR(-ENOMEM); + /* Try to reserve as many queues per pool as possible, + * we start with the configurations that support 4 queues + * per pools, followed by 2, and then by just 1 per pool. + */ + if (used_pools < 32 && adapter->num_rx_pools < 16) + reserved_pools = min_t(u16, + 32 - used_pools, + 16 - adapter->num_rx_pools); + else if (adapter->num_rx_pools < 32) + reserved_pools = min_t(u16, + 64 - used_pools, + 32 - adapter->num_rx_pools); + else + reserved_pools = 64 - used_pools; - pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); - set_bit(pool, adapter->fwd_bitmask); - limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1); - /* Enable VMDq flag so device will be set in VM mode */ - adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; - adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; + if (!reserved_pools) + return ERR_PTR(-EBUSY); - fwd_adapter->pool = pool; - fwd_adapter->real_adapter = adapter; + adapter->ring_feature[RING_F_VMDQ].limit += reserved_pools; - /* Force reinit of ring allocation with VMDQ enabled */ - err = ixgbe_setup_tc(pdev, adapter->hw_tcs); + /* Force reinit of ring allocation with VMDQ enabled */ + err = ixgbe_setup_tc(pdev, adapter->hw_tcs); + if (err) + return ERR_PTR(err); - if (!err && netif_running(pdev)) - err = ixgbe_fwd_ring_up(vdev, fwd_adapter); + if (pool >= adapter->num_rx_pools) + return ERR_PTR(-ENOMEM); + } - if (!err) - return fwd_adapter; + accel = kzalloc(sizeof(*accel), GFP_KERNEL); + if (!accel) + return ERR_PTR(-ENOMEM); - /* unwind counter and free adapter struct */ - netdev_info(pdev, - "%s: dfwd hardware acceleration failed\n", vdev->name); - clear_bit(pool, adapter->fwd_bitmask); - kfree(fwd_adapter); - return ERR_PTR(err); + set_bit(pool, adapter->fwd_bitmask); + accel->pool = pool; + accel->netdev = vdev; + + if (!netif_running(pdev)) + return accel; + + err = ixgbe_fwd_ring_up(adapter, accel); + if (err) + return ERR_PTR(err); + + return accel; } static void ixgbe_fwd_del(struct net_device *pdev, void *priv) { struct ixgbe_fwd_adapter *accel = priv; - struct ixgbe_adapter *adapter = accel->real_adapter; + struct ixgbe_adapter *adapter = netdev_priv(pdev); unsigned int rxbase = accel->rx_base_queue; - unsigned int limit, i; + unsigned int i; /* delete unicast filter associated with offloaded interface */ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr, VMDQ_P(accel->pool)); - /* disable ability to receive packets for this pool */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0); - /* Allow remaining Rx packets to get flushed out of the * Rx FIFO before we drop the netdev for the ring. */ @@ -9869,25 +9915,6 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) } clear_bit(accel->pool, adapter->fwd_bitmask); - limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools); - adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; - - /* go back to full RSS if we're done with our VMQs */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { - int rss = min_t(int, ixgbe_max_rss_indices(adapter), - num_online_cpus()); - - adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - adapter->ring_feature[RING_F_RSS].limit = rss; - } - - ixgbe_setup_tc(pdev, adapter->hw_tcs); - netdev_dbg(pdev, "pool %i:%i queues %i:%i\n", - accel->pool, adapter->num_rx_pools, - accel->rx_base_queue, - accel->rx_base_queue + - adapter->num_rx_queues_per_pool); kfree(accel); } @@ -9969,7 +9996,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) } } else { for (i = 0; i < adapter->num_rx_queues; i++) - xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); + (void)xchg(&adapter->rx_ring[i]->xdp_prog, + adapter->xdp_prog); } if (old_prog) @@ -9995,15 +10023,29 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } -static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +static void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) +{ + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + writel(ring->next_to_use, ring->tail); +} + +static int ixgbe_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_ring *ring; - int err; + int drops = 0; + int i; if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) return -ENETDOWN; + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ @@ -10011,35 +10053,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (unlikely(!ring)) return -ENXIO; - err = ixgbe_xmit_xdp_ring(adapter, xdp); - if (err != IXGBE_XDP_TX) - return -ENOSPC; - - return 0; -} - -static void ixgbe_xdp_flush(struct net_device *dev) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ring *ring; - - /* Its possible the device went down between xdp xmit and flush so - * we need to ensure device is still up. - */ - if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) - return; + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; - ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; - if (unlikely(!ring)) - return; + err = ixgbe_xmit_xdp_ring(adapter, xdpf); + if (err != IXGBE_XDP_TX) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } - /* Force memory writes to complete before letting h/w know there - * are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + if (unlikely(flags & XDP_XMIT_FLUSH)) + ixgbe_xdp_ring_update_tail(ring); - return; + return n - drops; } static const struct net_device_ops ixgbe_netdev_ops = { @@ -10089,7 +10117,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, - .ndo_xdp_flush = ixgbe_xdp_flush, }; /** @@ -10908,14 +10935,14 @@ skip_bad_vf_detection: rtnl_lock(); netif_device_detach(netdev); + if (netif_running(netdev)) + ixgbe_close_suspend(adapter); + if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } - if (netif_running(netdev)) - ixgbe_close_suspend(adapter); - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) pci_disable_device(pdev); rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index a0cb84381cd0..5679293e53f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index c4628b663590..e085b6520dac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_MBX_H_ #define _IXGBE_MBX_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 72446644f9fa..1e6cf220f543 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux drive - * Copyright(c) 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_MODEL_H_ #define _IXGBE_MODEL_H_ @@ -53,8 +29,8 @@ static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.src_ip[0] = val; - mask->formatted.src_ip[0] = m; + input->filter.formatted.src_ip[0] = (__force __be32)val; + mask->formatted.src_ip[0] = (__force __be32)m; return 0; } @@ -62,8 +38,8 @@ static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.dst_ip[0] = val; - mask->formatted.dst_ip[0] = m; + input->filter.formatted.dst_ip[0] = (__force __be32)val; + mask->formatted.dst_ip[0] = (__force __be32)m; return 0; } @@ -79,10 +55,10 @@ static inline int ixgbe_mat_prgm_ports(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) { - input->filter.formatted.src_port = val & 0xffff; - mask->formatted.src_port = m & 0xffff; - input->filter.formatted.dst_port = val >> 16; - mask->formatted.dst_port = m >> 16; + input->filter.formatted.src_port = (__force __be16)(val & 0xffff); + mask->formatted.src_port = (__force __be16)(m & 0xffff); + input->filter.formatted.dst_port = (__force __be16)(val >> 16); + mask->formatted.dst_port = (__force __be16)(m >> 16); return 0; }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 91bde90f9265..919a7af84b42 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index d6a7e77348c5..64e44e01c973 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_PHY_H_ #define _IXGBE_PHY_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index f6cc9166082a..b3e0d8bb5cbd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,30 +1,6 @@ -/******************************************************************************* +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ #include "ixgbe.h" #include <linux/ptp_classify.h> #include <linux/clocksource.h> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 008aa073a679..6f59933cdff7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/types.h> #include <linux/module.h> @@ -266,7 +241,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) #endif /* Disable VMDq flag so device will be set in VM mode */ - if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + if (bitmap_weight(adapter->fwd_bitmask, adapter->num_rx_pools) == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; rss = min_t(int, ixgbe_max_rss_indices(adapter), @@ -312,7 +287,8 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) * other values out of range. */ num_tc = adapter->hw_tcs; - num_rx_pools = adapter->num_rx_pools; + num_rx_pools = bitmap_weight(adapter->fwd_bitmask, + adapter->num_rx_pools); limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; @@ -878,14 +854,11 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET; - if (!is_zero_ether_addr(vf_mac)) { + if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; memcpy(addr, vf_mac, ETH_ALEN); } else { msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; - dev_warn(&adapter->pdev->dev, - "VF %d has no MAC address assigned, you may have to assign one manually\n", - vf); } /* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index e30d1f07e891..3ec21923c89c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_SRIOV_H_ #define _IXGBE_SRIOV_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 24766e125592..204844288c16 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe.h" #include "ixgbe_common.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 2daa81e6e9b2..e8ed37749ab1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,31 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_TYPE_H_ #define _IXGBE_TYPE_H_ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index b8c5fd2a2115..de563cfd294d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,30 +1,5 @@ -/******************************************************************************* - - Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2016 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - Linux NICS <linux.nics@intel.com> - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include <linux/pci.h> #include <linux/delay.h> diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index 182d640e9f7a..e246c0d2a427 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -1,27 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - *****************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "ixgbe_type.h" diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9592f3e3e42e..a8148c7126e5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,26 +1,6 @@ -/******************************************************************************* - * - * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2016 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * Linux NICS <linux.nics@intel.com> - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - ******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ + #include "ixgbe_x540.h" #include "ixgbe_type.h" #include "ixgbe_common.h" @@ -898,8 +878,9 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ - buffer.address = cpu_to_be32((offset + current_word) * 2); - buffer.length = cpu_to_be16(words_to_read * 2); + buffer.address = (__force u32)cpu_to_be32((offset + + current_word) * 2); + buffer.length = (__force u16)cpu_to_be16(words_to_read * 2); buffer.pad2 = 0; buffer.pad3 = 0; @@ -1109,9 +1090,9 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ - buffer.address = cpu_to_be32(offset * 2); + buffer.address = (__force u32)cpu_to_be32(offset * 2); /* one word */ - buffer.length = cpu_to_be16(sizeof(u16)); + buffer.length = (__force u16)cpu_to_be16(sizeof(u16)); status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index bb47814cfa90..aba1e6a37a6a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -1,31 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -################################################################################ -# -# Intel 82599 Virtual Function driver -# Copyright(c) 1999 - 2012 Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. -# -# The full GNU General Public License is included in this distribution in -# the file called "COPYING". -# -# Contact Information: -# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> -# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 -# -################################################################################ - +# Copyright(c) 1999 - 2018 Intel Corporation. # # Makefile for the Intel(R) 82599 VF ethernet driver # diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 71c828842b11..700d8eb2f6f8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBEVF_DEFINES_H_ #define _IXGBEVF_DEFINES_H_ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 8e7d6c6f5c92..e7813d76527c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2018 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /* ethtool support for ixgbevf */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 447ce1d5e0e3..56a1031dcc07 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2018 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBEVF_H_ #define _IXGBEVF_H_ @@ -100,6 +76,7 @@ enum ixgbevf_ring_state_t { __IXGBEVF_TX_DETECT_HANG, __IXGBEVF_HANG_CHECK_ARMED, __IXGBEVF_TX_XDP_RING, + __IXGBEVF_TX_XDP_RING_PRIMED, }; #define ring_is_xdp(ring) \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 850f8af95e49..59416eddd840 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2018 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ /****************************************************************************** Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code @@ -1014,24 +991,45 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, return IXGBEVF_XDP_CONSUMED; /* record the location of the first descriptor for this packet */ - tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; - tx_buffer->bytecount = len; - tx_buffer->gso_segs = 1; - tx_buffer->protocol = 0; - i = ring->next_to_use; - tx_desc = IXGBEVF_TX_DESC(ring, i); + tx_buffer = &ring->tx_buffer_info[i]; dma_unmap_len_set(tx_buffer, len, len); dma_unmap_addr_set(tx_buffer, dma, dma); tx_buffer->data = xdp->data; - tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_buffer->bytecount = len; + tx_buffer->gso_segs = 1; + tx_buffer->protocol = 0; + + /* Populate minimal context descriptor that will provide for the + * fact that we are expected to process Ethernet frames. + */ + if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { + struct ixgbe_adv_tx_context_desc *context_desc; + + set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); + + context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); + context_desc->vlan_macip_lens = + cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = + cpu_to_le32(IXGBE_TXD_CMD_DEXT | + IXGBE_ADVTXD_DTYP_CTXT); + context_desc->mss_l4len_idx = 0; + + i = 1; + } /* put descriptor type bits */ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_IFCS; cmd_type |= len | IXGBE_TXD_CMD; + + tx_desc = IXGBEVF_TX_DESC(ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.olinfo_status = cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | @@ -1711,6 +1709,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, sizeof(struct ixgbevf_tx_buffer) * ring->count); clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); + clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); @@ -3142,15 +3141,17 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; + rtnl_lock(); /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_REMOVING, &adapter->state) || - test_bit(__IXGBEVF_RESETTING, &adapter->state)) + test_bit(__IXGBEVF_RESETTING, &adapter->state)) { + rtnl_unlock(); return; + } adapter->tx_timeout_count++; - rtnl_lock(); ixgbevf_reinit_locked(adapter); rtnl_unlock(); } @@ -4187,6 +4188,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) return -EPERM; ether_addr_copy(hw->mac.addr, addr->sa_data); + ether_addr_copy(hw->mac.perm_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data); return 0; @@ -4770,14 +4772,14 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, rtnl_lock(); netif_device_detach(netdev); + if (netif_running(netdev)) + ixgbevf_close_suspend(adapter); + if (state == pci_channel_io_perm_failure) { rtnl_unlock(); return PCI_ERS_RESULT_DISCONNECT; } - if (netif_running(netdev)) - ixgbevf_close_suspend(adapter); - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) pci_disable_device(pdev); rtnl_unlock(); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 2819abc454c7..6bc1953263b9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "mbx.h" #include "ixgbevf.h" diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index 5ec947fe3d09..bfd9ae150808 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBE_MBX_H_ #define _IXGBE_MBX_H_ diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h index 278f73980501..68d16ae5b65a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/regs.h +++ b/drivers/net/ethernet/intel/ixgbevf/regs.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef _IXGBEVF_REGS_H_ #define _IXGBEVF_REGS_H_ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 38d3a327c1bc..bf0577e819e1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -1,28 +1,5 @@ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #include "vf.h" #include "ixgbevf.h" diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 194fbdaa4519..d1e9e306653b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -1,29 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/******************************************************************************* - - Intel 82599 Virtual Function driver - Copyright(c) 1999 - 2015 Intel Corporation. - - This program is free software; you can redistribute it and/or modify it - under the terms and conditions of the GNU General Public License, - version 2, as published by the Free Software Foundation. - - This program is distributed in the hope it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - more details. - - You should have received a copy of the GNU General Public License along with - this program; if not, see <http://www.gnu.org/licenses/>. - - The full GNU General Public License is included in this distribution in - the file called "COPYING". - - Contact Information: - e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - -*******************************************************************************/ +/* Copyright(c) 1999 - 2018 Intel Corporation. */ #ifndef __IXGBE_VF_H__ #define __IXGBE_VF_H__ diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index ebe5c9148935..cc2f7701e71e 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -86,6 +86,7 @@ config MVPP2 depends on ARCH_MVEBU || COMPILE_TEST depends on HAS_DMA select MVMDIO + select PHYLINK ---help--- This driver supports the network interface units in the Marvell ARMADA 375, 7K and 8K SoCs. diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index 9498ed26dbe5..55d4d10aa7d3 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_MVMDIO) += mvmdio.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o obj-$(CONFIG_MVNETA) += mvneta.o -obj-$(CONFIG_MVPP2) += mvpp2.o +obj-$(CONFIG_MVPP2) += mvpp2/ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index 0495487f7b42..c5dac6bd2be4 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -348,10 +348,7 @@ static int orion_mdio_probe(struct platform_device *pdev) goto out_mdio; } - if (pdev->dev.of_node) - ret = of_mdiobus_register(bus, pdev->dev.of_node); - else - ret = mdiobus_register(bus); + ret = of_mdiobus_register(bus, pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); goto out_mdio; diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c deleted file mode 100644 index 6f410235987c..000000000000 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ /dev/null @@ -1,8956 +0,0 @@ -/* - * Driver for Marvell PPv2 network controller for Armada 375 SoC. - * - * Copyright (C) 2014 Marvell - * - * Marcin Wojtas <mw@semihalf.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <linux/acpi.h> -#include <linux/kernel.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/platform_device.h> -#include <linux/skbuff.h> -#include <linux/inetdevice.h> -#include <linux/mbus.h> -#include <linux/module.h> -#include <linux/mfd/syscon.h> -#include <linux/interrupt.h> -#include <linux/cpumask.h> -#include <linux/of.h> -#include <linux/of_irq.h> -#include <linux/of_mdio.h> -#include <linux/of_net.h> -#include <linux/of_address.h> -#include <linux/of_device.h> -#include <linux/phy.h> -#include <linux/phy/phy.h> -#include <linux/clk.h> -#include <linux/hrtimer.h> -#include <linux/ktime.h> -#include <linux/regmap.h> -#include <uapi/linux/ppp_defs.h> -#include <net/ip.h> -#include <net/ipv6.h> -#include <net/tso.h> - -/* Fifo Registers */ -#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) -#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) -#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 -#define MVPP2_RX_FIFO_INIT_REG 0x64 -#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) -#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) - -/* RX DMA Top Registers */ -#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) -#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) -#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) -#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) -#define MVPP2_POOL_BUF_SIZE_OFFSET 5 -#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) -#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff -#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) -#define MVPP2_RXQ_POOL_SHORT_OFFS 20 -#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 -#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 -#define MVPP2_RXQ_POOL_LONG_OFFS 24 -#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 -#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 -#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 -#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 -#define MVPP2_RXQ_DISABLE_MASK BIT(31) - -/* Top Registers */ -#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) -#define MVPP2_DSA_EXTENDED BIT(5) - -/* Parser Registers */ -#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 -#define MVPP2_PRS_PORT_LU_MAX 0xf -#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) -#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) -#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) -#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) -#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) -#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) -#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) -#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) -#define MVPP2_PRS_TCAM_IDX_REG 0x1100 -#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) -#define MVPP2_PRS_TCAM_INV_MASK BIT(31) -#define MVPP2_PRS_SRAM_IDX_REG 0x1200 -#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) -#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 -#define MVPP2_PRS_TCAM_EN_MASK BIT(0) - -/* RSS Registers */ -#define MVPP22_RSS_INDEX 0x1500 -#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) -#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) -#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) -#define MVPP22_RSS_TABLE_ENTRY 0x1508 -#define MVPP22_RSS_TABLE 0x1510 -#define MVPP22_RSS_TABLE_POINTER(p) (p) -#define MVPP22_RSS_WIDTH 0x150c - -/* Classifier Registers */ -#define MVPP2_CLS_MODE_REG 0x1800 -#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) -#define MVPP2_CLS_PORT_WAY_REG 0x1810 -#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) -#define MVPP2_CLS_LKP_INDEX_REG 0x1814 -#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 -#define MVPP2_CLS_LKP_TBL_REG 0x1818 -#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff -#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) -#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 -#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 -#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 -#define MVPP2_CLS_FLOW_TBL2_REG 0x182c -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 -#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 -#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) -#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 -#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) - -/* Descriptor Manager Top Registers */ -#define MVPP2_RXQ_NUM_REG 0x2040 -#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 -#define MVPP22_DESC_ADDR_OFFS 8 -#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 -#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) -#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 -#define MVPP2_RXQ_NUM_NEW_OFFSET 16 -#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) -#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff -#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 -#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 -#define MVPP2_RXQ_THRESH_REG 0x204c -#define MVPP2_OCCUPIED_THRESH_OFFSET 0 -#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff -#define MVPP2_RXQ_INDEX_REG 0x2050 -#define MVPP2_TXQ_NUM_REG 0x2080 -#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 -#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 -#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_TXQ_THRESH_REG 0x2094 -#define MVPP2_TXQ_THRESH_OFFSET 16 -#define MVPP2_TXQ_THRESH_MASK 0x3fff -#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 -#define MVPP2_TXQ_INDEX_REG 0x2098 -#define MVPP2_TXQ_PREF_BUF_REG 0x209c -#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) -#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) -#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) -#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) -#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) -#define MVPP2_TXQ_PENDING_REG 0x20a0 -#define MVPP2_TXQ_PENDING_MASK 0x3fff -#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 -#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) -#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 -#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 -#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 -#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 -#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 -#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff -#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 -#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 -#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) -#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 -#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) -#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 -#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) -#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff -#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) - -/* MBUS bridge registers */ -#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) -#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) -#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) -#define MVPP2_BASE_ADDR_ENABLE 0x4060 - -/* AXI Bridge Registers */ -#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 -#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 -#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 -#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 -#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 -#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c -#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 -#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 -#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 -#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 -#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 -#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 - -/* Values for AXI Bridge registers */ -#define MVPP22_AXI_ATTR_CACHE_OFFS 0 -#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 - -#define MVPP22_AXI_CODE_CACHE_OFFS 0 -#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 - -#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 -#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 -#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb - -#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 -#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 - -/* Interrupt Cause and Mask registers */ -#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) -#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 - -#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) -#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 -#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) - -#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 -#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 - -#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 - -#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 -#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 - -#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) -#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) -#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) -#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) -#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff -#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 -#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 -#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) -#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) -#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) -#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) -#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) -#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) -#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) -#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc -#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff -#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 -#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) -#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 - -/* Buffer Manager registers */ -#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) -#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 -#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) -#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 -#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) -#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 -#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) -#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 -#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) -#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) -#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff -#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 -#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) -#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) -#define MVPP2_BM_START_MASK BIT(0) -#define MVPP2_BM_STOP_MASK BIT(1) -#define MVPP2_BM_STATE_MASK BIT(4) -#define MVPP2_BM_LOW_THRESH_OFFS 8 -#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 -#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ - MVPP2_BM_LOW_THRESH_OFFS) -#define MVPP2_BM_HIGH_THRESH_OFFS 16 -#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 -#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ - MVPP2_BM_HIGH_THRESH_OFFS) -#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) -#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) -#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) -#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) -#define MVPP2_BM_BPPE_FULL_MASK BIT(3) -#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) -#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) -#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) -#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) -#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 -#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 -#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 -#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 -#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) -#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) -#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) -#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) -#define MVPP2_BM_VIRT_RLS_REG 0x64c0 -#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 -#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 - -/* TX Scheduler registers */ -#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 -#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 -#define MVPP2_TXP_SCHED_ENQ_MASK 0xff -#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 -#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 -#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 -#define MVPP2_TXP_SCHED_MTU_REG 0x801c -#define MVPP2_TXP_MTU_MAX 0x7FFFF -#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 -#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff -#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 -#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) -#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 -#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff -#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) -#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff -#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 -#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) -#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) -#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff -#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) -#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff - -/* TX general registers */ -#define MVPP2_TX_SNOOP_REG 0x8800 -#define MVPP2_TX_PORT_FLUSH_REG 0x8810 -#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) - -/* LMS registers */ -#define MVPP2_SRC_ADDR_MIDDLE 0x24 -#define MVPP2_SRC_ADDR_HIGH 0x28 -#define MVPP2_PHY_AN_CFG0_REG 0x34 -#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) -#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c -#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 - -/* Per-port registers */ -#define MVPP2_GMAC_CTRL_0_REG 0x0 -#define MVPP2_GMAC_PORT_EN_MASK BIT(0) -#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) -#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 -#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc -#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) -#define MVPP2_GMAC_CTRL_1_REG 0x4 -#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) -#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) -#define MVPP2_GMAC_PCS_LB_EN_BIT 6 -#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) -#define MVPP2_GMAC_SA_LOW_OFFS 7 -#define MVPP2_GMAC_CTRL_2_REG 0x8 -#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) -#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) -#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) -#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) -#define MVPP2_GMAC_DISABLE_PADDING BIT(5) -#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) -#define MVPP2_GMAC_AUTONEG_CONFIG 0xc -#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) -#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) -#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) -#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) -#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) -#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) -#define MVPP2_GMAC_AN_SPEED_EN BIT(7) -#define MVPP2_GMAC_FC_ADV_EN BIT(9) -#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) -#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) -#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) -#define MVPP2_GMAC_STATUS0 0x10 -#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) -#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c -#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ - MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) -#define MVPP22_GMAC_INT_STAT 0x20 -#define MVPP22_GMAC_INT_STAT_LINK BIT(1) -#define MVPP22_GMAC_INT_MASK 0x24 -#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) -#define MVPP22_GMAC_CTRL_4_REG 0x90 -#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) -#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) -#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) -#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) -#define MVPP22_GMAC_INT_SUM_MASK 0xa4 -#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) - -/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, - * relative to port->base. - */ -#define MVPP22_XLG_CTRL0_REG 0x100 -#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) -#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) -#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) -#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) -#define MVPP22_XLG_CTRL1_REG 0x104 -#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 -#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff -#define MVPP22_XLG_STATUS 0x10c -#define MVPP22_XLG_STATUS_LINK_UP BIT(0) -#define MVPP22_XLG_INT_STAT 0x114 -#define MVPP22_XLG_INT_STAT_LINK BIT(1) -#define MVPP22_XLG_INT_MASK 0x118 -#define MVPP22_XLG_INT_MASK_LINK BIT(1) -#define MVPP22_XLG_CTRL3_REG 0x11c -#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) -#define MVPP22_XLG_EXT_INT_MASK 0x15c -#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) -#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) -#define MVPP22_XLG_CTRL4_REG 0x184 -#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) -#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) -#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) - -/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ -#define MVPP22_SMI_MISC_CFG_REG 0x1204 -#define MVPP22_SMI_POLLING_EN BIT(10) - -#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) - -#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff - -/* Descriptor ring Macros */ -#define MVPP2_QUEUE_NEXT_DESC(q, index) \ - (((index) < (q)->last_desc) ? ((index) + 1) : 0) - -/* XPCS registers. PPv2.2 only */ -#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) -#define MVPP22_MPCS_CTRL 0x14 -#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) -#define MVPP22_MPCS_CLK_RESET 0x14c -#define MAC_CLK_RESET_SD_TX BIT(0) -#define MAC_CLK_RESET_SD_RX BIT(1) -#define MAC_CLK_RESET_MAC BIT(2) -#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) -#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) - -/* XPCS registers. PPv2.2 only */ -#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) -#define MVPP22_XPCS_CFG0 0x0 -#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) -#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) - -/* System controller registers. Accessed through a regmap. */ -#define GENCONF_SOFT_RESET1 0x1108 -#define GENCONF_SOFT_RESET1_GOP BIT(6) -#define GENCONF_PORT_CTRL0 0x1110 -#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) -#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) -#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) -#define GENCONF_PORT_CTRL1 0x1114 -#define GENCONF_PORT_CTRL1_EN(p) BIT(p) -#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) -#define GENCONF_CTRL0 0x1120 -#define GENCONF_CTRL0_PORT0_RGMII BIT(0) -#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) -#define GENCONF_CTRL0_PORT1_RGMII BIT(2) - -/* Various constants */ - -/* Coalescing */ -#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 -#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL -#define MVPP2_TXDONE_COAL_USEC 1000 -#define MVPP2_RX_COAL_PKTS 32 -#define MVPP2_RX_COAL_USEC 64 - -/* The two bytes Marvell header. Either contains a special value used - * by Marvell switches when a specific hardware mode is enabled (not - * supported by this driver) or is filled automatically by zeroes on - * the RX side. Those two bytes being at the front of the Ethernet - * header, they allow to have the IP header aligned on a 4 bytes - * boundary automatically: the hardware skips those two bytes on its - * own. - */ -#define MVPP2_MH_SIZE 2 -#define MVPP2_ETH_TYPE_LEN 2 -#define MVPP2_PPPOE_HDR_SIZE 8 -#define MVPP2_VLAN_TAG_LEN 4 -#define MVPP2_VLAN_TAG_EDSA_LEN 8 - -/* Lbtd 802.3 type */ -#define MVPP2_IP_LBDT_TYPE 0xfffa - -#define MVPP2_TX_CSUM_MAX_SIZE 9800 - -/* Timeout constants */ -#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 -#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 - -#define MVPP2_TX_MTU_MAX 0x7ffff - -/* Maximum number of T-CONTs of PON port */ -#define MVPP2_MAX_TCONT 16 - -/* Maximum number of supported ports */ -#define MVPP2_MAX_PORTS 4 - -/* Maximum number of TXQs used by single port */ -#define MVPP2_MAX_TXQ 8 - -/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO - * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), - * multiply this value by two to count the maximum number of skb descs needed. - */ -#define MVPP2_MAX_TSO_SEGS 300 -#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) - -/* Dfault number of RXQs in use */ -#define MVPP2_DEFAULT_RXQ 4 - -/* Max number of Rx descriptors */ -#define MVPP2_MAX_RXD_MAX 1024 -#define MVPP2_MAX_RXD_DFLT 128 - -/* Max number of Tx descriptors */ -#define MVPP2_MAX_TXD_MAX 2048 -#define MVPP2_MAX_TXD_DFLT 1024 - -/* Amount of Tx descriptors that can be reserved at once by CPU */ -#define MVPP2_CPU_DESC_CHUNK 64 - -/* Max number of Tx descriptors in each aggregated queue */ -#define MVPP2_AGGR_TXQ_SIZE 256 - -/* Descriptor aligned size */ -#define MVPP2_DESC_ALIGNED_SIZE 32 - -/* Descriptor alignment mask */ -#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) - -/* RX FIFO constants */ -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 -#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 -#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 - -/* TX FIFO constants */ -#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa -#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 -#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 -#define MVPP2_TX_FIFO_THRESHOLD_10KB \ - (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) -#define MVPP2_TX_FIFO_THRESHOLD_3KB \ - (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) - -/* RX buffer constants */ -#define MVPP2_SKB_SHINFO_SIZE \ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - -#define MVPP2_RX_PKT_SIZE(mtu) \ - ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ - ETH_HLEN + ETH_FCS_LEN, cache_line_size()) - -#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) -#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) -#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ - ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) - -#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) - -/* IPv6 max L3 address size */ -#define MVPP2_MAX_L3_ADDR_SIZE 16 - -/* Port flags */ -#define MVPP2_F_LOOPBACK BIT(0) - -/* Marvell tag types */ -enum mvpp2_tag_type { - MVPP2_TAG_TYPE_NONE = 0, - MVPP2_TAG_TYPE_MH = 1, - MVPP2_TAG_TYPE_DSA = 2, - MVPP2_TAG_TYPE_EDSA = 3, - MVPP2_TAG_TYPE_VLAN = 4, - MVPP2_TAG_TYPE_LAST = 5 -}; - -/* Parser constants */ -#define MVPP2_PRS_TCAM_SRAM_SIZE 256 -#define MVPP2_PRS_TCAM_WORDS 6 -#define MVPP2_PRS_SRAM_WORDS 4 -#define MVPP2_PRS_FLOW_ID_SIZE 64 -#define MVPP2_PRS_FLOW_ID_MASK 0x3f -#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 -#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) -#define MVPP2_PRS_IPV4_HEAD 0x40 -#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 -#define MVPP2_PRS_IPV4_MC 0xe0 -#define MVPP2_PRS_IPV4_MC_MASK 0xf0 -#define MVPP2_PRS_IPV4_BC_MASK 0xff -#define MVPP2_PRS_IPV4_IHL 0x5 -#define MVPP2_PRS_IPV4_IHL_MASK 0xf -#define MVPP2_PRS_IPV6_MC 0xff -#define MVPP2_PRS_IPV6_MC_MASK 0xff -#define MVPP2_PRS_IPV6_HOP_MASK 0xff -#define MVPP2_PRS_TCAM_PROTO_MASK 0xff -#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f -#define MVPP2_PRS_DBL_VLANS_MAX 100 -#define MVPP2_PRS_CAST_MASK BIT(0) -#define MVPP2_PRS_MCAST_VAL BIT(0) -#define MVPP2_PRS_UCAST_VAL 0x0 - -/* Tcam structure: - * - lookup ID - 4 bits - * - port ID - 1 byte - * - additional information - 1 byte - * - header data - 8 bytes - * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). - */ -#define MVPP2_PRS_AI_BITS 8 -#define MVPP2_PRS_PORT_MASK 0xff -#define MVPP2_PRS_LU_MASK 0xf -#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ - (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) -#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ - (((offs) * 2) - ((offs) % 2) + 2) -#define MVPP2_PRS_TCAM_AI_BYTE 16 -#define MVPP2_PRS_TCAM_PORT_BYTE 17 -#define MVPP2_PRS_TCAM_LU_BYTE 20 -#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) -#define MVPP2_PRS_TCAM_INV_WORD 5 - -#define MVPP2_PRS_VID_TCAM_BYTE 2 - -/* TCAM range for unicast and multicast filtering. We have 25 entries per port, - * with 4 dedicated to UC filtering and the rest to multicast filtering. - * Additionnally we reserve one entry for the broadcast address, and one for - * each port's own address. - */ -#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 -#define MVPP2_PRS_MAC_RANGE_SIZE 80 - -/* Number of entries per port dedicated to UC and MC filtering */ -#define MVPP2_PRS_MAC_UC_FILT_MAX 4 -#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ - MVPP2_PRS_MAC_UC_FILT_MAX) - -/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 - * 10 VLAN ID filter entries per port - * 1 default VLAN filter entry per port - * It is assumed that there are 3 ports for filter, not including loopback port - */ -#define MVPP2_PRS_VLAN_FILT_MAX 11 -#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 - -#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) -#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) - -/* Tcam entries ID */ -#define MVPP2_PE_DROP_ALL 0 -#define MVPP2_PE_FIRST_FREE_TID 1 - -/* MAC filtering range */ -#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) -#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ - MVPP2_PRS_MAC_RANGE_SIZE + 1) -/* VLAN filtering range */ -#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) -#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ - MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) -#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) -#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) -#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) -#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) -#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) -#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) -#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) -#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) -#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) -#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) -#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) -#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) -#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) -#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) -#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) -#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) -#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) -#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) -#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) -#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) -#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) -#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) -#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) -/* reserved */ -#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) -#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) -#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) - -#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ - ((port) * MVPP2_PRS_VLAN_FILT_MAX)) -#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ - + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) -/* Index of default vid filter for given port */ -#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ - + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) - -/* Sram structure - * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). - */ -#define MVPP2_PRS_SRAM_RI_OFFS 0 -#define MVPP2_PRS_SRAM_RI_WORD 0 -#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 -#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 -#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 -#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 -#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 -#define MVPP2_PRS_SRAM_UDF_OFFS 73 -#define MVPP2_PRS_SRAM_UDF_BITS 8 -#define MVPP2_PRS_SRAM_UDF_MASK 0xff -#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 -#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 -#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 -#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 -#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 -#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 -#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 -#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 -#define MVPP2_PRS_SRAM_AI_OFFS 90 -#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 -#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 -#define MVPP2_PRS_SRAM_AI_MASK 0xff -#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 -#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf -#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 -#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 - -/* Sram result info bits assignment */ -#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 -#define MVPP2_PRS_RI_DSA_MASK 0x2 -#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) -#define MVPP2_PRS_RI_VLAN_NONE 0x0 -#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) -#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) -#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) -#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 -#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) -#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) -#define MVPP2_PRS_RI_L2_UCAST 0x0 -#define MVPP2_PRS_RI_L2_MCAST BIT(9) -#define MVPP2_PRS_RI_L2_BCAST BIT(10) -#define MVPP2_PRS_RI_PPPOE_MASK 0x800 -#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_UN 0x0 -#define MVPP2_PRS_RI_L3_IP4 BIT(12) -#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) -#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) -#define MVPP2_PRS_RI_L3_IP6 BIT(14) -#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) -#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) -#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) -#define MVPP2_PRS_RI_L3_UCAST 0x0 -#define MVPP2_PRS_RI_L3_MCAST BIT(15) -#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) -#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 -#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) -#define MVPP2_PRS_RI_UDF3_MASK 0x300000 -#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) -#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 -#define MVPP2_PRS_RI_L4_TCP BIT(22) -#define MVPP2_PRS_RI_L4_UDP BIT(23) -#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) -#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 -#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) -#define MVPP2_PRS_RI_DROP_MASK 0x80000000 - -/* Sram additional info bits assignment */ -#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) -#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) -#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) -#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) -#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) -#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) -#define MVPP2_PRS_SINGLE_VLAN_AI 0 -#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) -#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) - -/* DSA/EDSA type */ -#define MVPP2_PRS_TAGGED true -#define MVPP2_PRS_UNTAGGED false -#define MVPP2_PRS_EDSA true -#define MVPP2_PRS_DSA false - -/* MAC entries, shadow udf */ -enum mvpp2_prs_udf { - MVPP2_PRS_UDF_MAC_DEF, - MVPP2_PRS_UDF_MAC_RANGE, - MVPP2_PRS_UDF_L2_DEF, - MVPP2_PRS_UDF_L2_DEF_COPY, - MVPP2_PRS_UDF_L2_USER, -}; - -/* Lookup ID */ -enum mvpp2_prs_lookup { - MVPP2_PRS_LU_MH, - MVPP2_PRS_LU_MAC, - MVPP2_PRS_LU_DSA, - MVPP2_PRS_LU_VLAN, - MVPP2_PRS_LU_VID, - MVPP2_PRS_LU_L2, - MVPP2_PRS_LU_PPPOE, - MVPP2_PRS_LU_IP4, - MVPP2_PRS_LU_IP6, - MVPP2_PRS_LU_FLOWS, - MVPP2_PRS_LU_LAST, -}; - -/* L2 cast enum */ -enum mvpp2_prs_l2_cast { - MVPP2_PRS_L2_UNI_CAST, - MVPP2_PRS_L2_MULTI_CAST, -}; - -/* L3 cast enum */ -enum mvpp2_prs_l3_cast { - MVPP2_PRS_L3_UNI_CAST, - MVPP2_PRS_L3_MULTI_CAST, - MVPP2_PRS_L3_BROAD_CAST -}; - -/* Classifier constants */ -#define MVPP2_CLS_FLOWS_TBL_SIZE 512 -#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 -#define MVPP2_CLS_LKP_TBL_SIZE 64 -#define MVPP2_CLS_RX_QUEUES 256 - -/* RSS constants */ -#define MVPP22_RSS_TABLE_ENTRIES 32 - -/* BM constants */ -#define MVPP2_BM_JUMBO_BUF_NUM 512 -#define MVPP2_BM_LONG_BUF_NUM 1024 -#define MVPP2_BM_SHORT_BUF_NUM 2048 -#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) -#define MVPP2_BM_POOL_PTR_ALIGN 128 - -/* BM cookie (32 bits) definition */ -#define MVPP2_BM_COOKIE_POOL_OFFS 8 -#define MVPP2_BM_COOKIE_CPU_OFFS 24 - -#define MVPP2_BM_SHORT_FRAME_SIZE 512 -#define MVPP2_BM_LONG_FRAME_SIZE 2048 -#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 -/* BM short pool packet size - * These value assure that for SWF the total number - * of bytes allocated for each buffer will be 512 - */ -#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) -#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) -#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) - -#define MVPP21_ADDR_SPACE_SZ 0 -#define MVPP22_ADDR_SPACE_SZ SZ_64K - -#define MVPP2_MAX_THREADS 8 -#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS - -enum mvpp2_bm_pool_log_num { - MVPP2_BM_SHORT, - MVPP2_BM_LONG, - MVPP2_BM_JUMBO, - MVPP2_BM_POOLS_NUM -}; - -static struct { - int pkt_size; - int buf_num; -} mvpp2_pools[MVPP2_BM_POOLS_NUM]; - -/* GMAC MIB Counters register definitions */ -#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 -#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 -#define MVPP22_MIB_COUNTERS_OFFSET 0x0 -#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 - -#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 -#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 -#define MVPP2_MIB_CRC_ERRORS_SENT 0xc -#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 -#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 -#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c -#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 -#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 -#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 -#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c -#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 -#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 -#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 -#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 -#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 -#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c -#define MVPP2_MIB_FC_SENT 0x54 -#define MVPP2_MIB_FC_RCVD 0x58 -#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c -#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 -#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 -#define MVPP2_MIB_OVERSIZE_RCVD 0x68 -#define MVPP2_MIB_JABBER_RCVD 0x6c -#define MVPP2_MIB_MAC_RCV_ERROR 0x70 -#define MVPP2_MIB_BAD_CRC_EVENT 0x74 -#define MVPP2_MIB_COLLISION 0x78 -#define MVPP2_MIB_LATE_COLLISION 0x7c - -#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) - -#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) - -/* Definitions */ - -/* Shared Packet Processor resources */ -struct mvpp2 { - /* Shared registers' base addresses */ - void __iomem *lms_base; - void __iomem *iface_base; - - /* On PPv2.2, each "software thread" can access the base - * register through a separate address space, each 64 KB apart - * from each other. Typically, such address spaces will be - * used per CPU. - */ - void __iomem *swth_base[MVPP2_MAX_THREADS]; - - /* On PPv2.2, some port control registers are located into the system - * controller space. These registers are accessible through a regmap. - */ - struct regmap *sysctrl_base; - - /* Common clocks */ - struct clk *pp_clk; - struct clk *gop_clk; - struct clk *mg_clk; - struct clk *mg_core_clk; - struct clk *axi_clk; - - /* List of pointers to port structures */ - int port_count; - struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; - - /* Aggregated TXQs */ - struct mvpp2_tx_queue *aggr_txqs; - - /* BM pools */ - struct mvpp2_bm_pool *bm_pools; - - /* PRS shadow table */ - struct mvpp2_prs_shadow *prs_shadow; - /* PRS auxiliary table for double vlan entries control */ - bool *prs_double_vlans; - - /* Tclk value */ - u32 tclk; - - /* HW version */ - enum { MVPP21, MVPP22 } hw_version; - - /* Maximum number of RXQs per port */ - unsigned int max_port_rxqs; - - /* Workqueue to gather hardware statistics */ - char queue_name[30]; - struct workqueue_struct *stats_queue; -}; - -struct mvpp2_pcpu_stats { - struct u64_stats_sync syncp; - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; -}; - -/* Per-CPU port control */ -struct mvpp2_port_pcpu { - struct hrtimer tx_done_timer; - bool timer_scheduled; - /* Tasklet for egress finalization */ - struct tasklet_struct tx_done_tasklet; -}; - -struct mvpp2_queue_vector { - int irq; - struct napi_struct napi; - enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; - int sw_thread_id; - u16 sw_thread_mask; - int first_rxq; - int nrxqs; - u32 pending_cause_rx; - struct mvpp2_port *port; -}; - -struct mvpp2_port { - u8 id; - - /* Index of the port from the "group of ports" complex point - * of view - */ - int gop_id; - - int link_irq; - - struct mvpp2 *priv; - - /* Firmware node associated to the port */ - struct fwnode_handle *fwnode; - - /* Per-port registers' base address */ - void __iomem *base; - void __iomem *stats_base; - - struct mvpp2_rx_queue **rxqs; - unsigned int nrxqs; - struct mvpp2_tx_queue **txqs; - unsigned int ntxqs; - struct net_device *dev; - - int pkt_size; - - /* Per-CPU port control */ - struct mvpp2_port_pcpu __percpu *pcpu; - - /* Flags */ - unsigned long flags; - - u16 tx_ring_size; - u16 rx_ring_size; - struct mvpp2_pcpu_stats __percpu *stats; - u64 *ethtool_stats; - - /* Per-port work and its lock to gather hardware statistics */ - struct mutex gather_stats_lock; - struct delayed_work stats_work; - - phy_interface_t phy_interface; - struct device_node *phy_node; - struct phy *comphy; - unsigned int link; - unsigned int duplex; - unsigned int speed; - - struct mvpp2_bm_pool *pool_long; - struct mvpp2_bm_pool *pool_short; - - /* Index of first port's physical RXQ */ - u8 first_rxq; - - struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; - unsigned int nqvecs; - bool has_tx_irqs; - - u32 tx_time_coal; -}; - -/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the - * layout of the transmit and reception DMA descriptors, and their - * layout is therefore defined by the hardware design - */ - -#define MVPP2_TXD_L3_OFF_SHIFT 0 -#define MVPP2_TXD_IP_HLEN_SHIFT 8 -#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) -#define MVPP2_TXD_L4_CSUM_NOT BIT(14) -#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) -#define MVPP2_TXD_PADDING_DISABLE BIT(23) -#define MVPP2_TXD_L4_UDP BIT(24) -#define MVPP2_TXD_L3_IP6 BIT(26) -#define MVPP2_TXD_L_DESC BIT(28) -#define MVPP2_TXD_F_DESC BIT(29) - -#define MVPP2_RXD_ERR_SUMMARY BIT(15) -#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) -#define MVPP2_RXD_ERR_CRC 0x0 -#define MVPP2_RXD_ERR_OVERRUN BIT(13) -#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) -#define MVPP2_RXD_BM_POOL_ID_OFFS 16 -#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) -#define MVPP2_RXD_HWF_SYNC BIT(21) -#define MVPP2_RXD_L4_CSUM_OK BIT(22) -#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) -#define MVPP2_RXD_L4_TCP BIT(25) -#define MVPP2_RXD_L4_UDP BIT(26) -#define MVPP2_RXD_L3_IP4 BIT(28) -#define MVPP2_RXD_L3_IP6 BIT(30) -#define MVPP2_RXD_BUF_HDR BIT(31) - -/* HW TX descriptor for PPv2.1 */ -struct mvpp21_tx_desc { - u32 command; /* Options used by HW for packet transmitting.*/ - u8 packet_offset; /* the offset from the buffer beginning */ - u8 phys_txq; /* destination queue ID */ - u16 data_size; /* data size of transmitted packet in bytes */ - u32 buf_dma_addr; /* physical addr of transmitted buffer */ - u32 buf_cookie; /* cookie for access to TX buffer in tx path */ - u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ - u32 reserved2; /* reserved (for future use) */ -}; - -/* HW RX descriptor for PPv2.1 */ -struct mvpp21_rx_desc { - u32 status; /* info about received packet */ - u16 reserved1; /* parser_info (for future use, PnC) */ - u16 data_size; /* size of received packet in bytes */ - u32 buf_dma_addr; /* physical address of the buffer */ - u32 buf_cookie; /* cookie for access to RX buffer in rx path */ - u16 reserved2; /* gem_port_id (for future use, PON) */ - u16 reserved3; /* csum_l4 (for future use, PnC) */ - u8 reserved4; /* bm_qset (for future use, BM) */ - u8 reserved5; - u16 reserved6; /* classify_info (for future use, PnC) */ - u32 reserved7; /* flow_id (for future use, PnC) */ - u32 reserved8; -}; - -/* HW TX descriptor for PPv2.2 */ -struct mvpp22_tx_desc { - u32 command; - u8 packet_offset; - u8 phys_txq; - u16 data_size; - u64 reserved1; - u64 buf_dma_addr_ptp; - u64 buf_cookie_misc; -}; - -/* HW RX descriptor for PPv2.2 */ -struct mvpp22_rx_desc { - u32 status; - u16 reserved1; - u16 data_size; - u32 reserved2; - u32 reserved3; - u64 buf_dma_addr_key_hash; - u64 buf_cookie_misc; -}; - -/* Opaque type used by the driver to manipulate the HW TX and RX - * descriptors - */ -struct mvpp2_tx_desc { - union { - struct mvpp21_tx_desc pp21; - struct mvpp22_tx_desc pp22; - }; -}; - -struct mvpp2_rx_desc { - union { - struct mvpp21_rx_desc pp21; - struct mvpp22_rx_desc pp22; - }; -}; - -struct mvpp2_txq_pcpu_buf { - /* Transmitted SKB */ - struct sk_buff *skb; - - /* Physical address of transmitted buffer */ - dma_addr_t dma; - - /* Size transmitted */ - size_t size; -}; - -/* Per-CPU Tx queue control */ -struct mvpp2_txq_pcpu { - int cpu; - - /* Number of Tx DMA descriptors in the descriptor ring */ - int size; - - /* Number of currently used Tx DMA descriptor in the - * descriptor ring - */ - int count; - - int wake_threshold; - int stop_threshold; - - /* Number of Tx DMA descriptors reserved for each CPU */ - int reserved_num; - - /* Infos about transmitted buffers */ - struct mvpp2_txq_pcpu_buf *buffs; - - /* Index of last TX DMA descriptor that was inserted */ - int txq_put_index; - - /* Index of the TX DMA descriptor to be cleaned up */ - int txq_get_index; - - /* DMA buffer for TSO headers */ - char *tso_headers; - dma_addr_t tso_headers_dma; -}; - -struct mvpp2_tx_queue { - /* Physical number of this Tx queue */ - u8 id; - - /* Logical number of this Tx queue */ - u8 log_id; - - /* Number of Tx DMA descriptors in the descriptor ring */ - int size; - - /* Number of currently used Tx DMA descriptor in the descriptor ring */ - int count; - - /* Per-CPU control of physical Tx queues */ - struct mvpp2_txq_pcpu __percpu *pcpu; - - u32 done_pkts_coal; - - /* Virtual address of thex Tx DMA descriptors array */ - struct mvpp2_tx_desc *descs; - - /* DMA address of the Tx DMA descriptors array */ - dma_addr_t descs_dma; - - /* Index of the last Tx DMA descriptor */ - int last_desc; - - /* Index of the next Tx DMA descriptor to process */ - int next_desc_to_proc; -}; - -struct mvpp2_rx_queue { - /* RX queue number, in the range 0-31 for physical RXQs */ - u8 id; - - /* Num of rx descriptors in the rx descriptor ring */ - int size; - - u32 pkts_coal; - u32 time_coal; - - /* Virtual address of the RX DMA descriptors array */ - struct mvpp2_rx_desc *descs; - - /* DMA address of the RX DMA descriptors array */ - dma_addr_t descs_dma; - - /* Index of the last RX DMA descriptor */ - int last_desc; - - /* Index of the next RX DMA descriptor to process */ - int next_desc_to_proc; - - /* ID of port to which physical RXQ is mapped */ - int port; - - /* Port's logic RXQ number to which physical RXQ is mapped */ - int logic_rxq; -}; - -union mvpp2_prs_tcam_entry { - u32 word[MVPP2_PRS_TCAM_WORDS]; - u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; -}; - -union mvpp2_prs_sram_entry { - u32 word[MVPP2_PRS_SRAM_WORDS]; - u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; -}; - -struct mvpp2_prs_entry { - u32 index; - union mvpp2_prs_tcam_entry tcam; - union mvpp2_prs_sram_entry sram; -}; - -struct mvpp2_prs_shadow { - bool valid; - bool finish; - - /* Lookup ID */ - int lu; - - /* User defined offset */ - int udf; - - /* Result info */ - u32 ri; - u32 ri_mask; -}; - -struct mvpp2_cls_flow_entry { - u32 index; - u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; -}; - -struct mvpp2_cls_lookup_entry { - u32 lkpid; - u32 way; - u32 data; -}; - -struct mvpp2_bm_pool { - /* Pool number in the range 0-7 */ - int id; - - /* Buffer Pointers Pool External (BPPE) size */ - int size; - /* BPPE size in bytes */ - int size_bytes; - /* Number of buffers for this pool */ - int buf_num; - /* Pool buffer size */ - int buf_size; - /* Packet size */ - int pkt_size; - int frag_size; - - /* BPPE virtual base address */ - u32 *virt_addr; - /* BPPE DMA base address */ - dma_addr_t dma_addr; - - /* Ports using BM pool */ - u32 port_map; -}; - -#define IS_TSO_HEADER(txq_pcpu, addr) \ - ((addr) >= (txq_pcpu)->tso_headers_dma && \ - (addr) < (txq_pcpu)->tso_headers_dma + \ - (txq_pcpu)->size * TSO_HEADER_SIZE) - -/* Queue modes */ -#define MVPP2_QDIST_SINGLE_MODE 0 -#define MVPP2_QDIST_MULTI_MODE 1 - -static int queue_mode = MVPP2_QDIST_SINGLE_MODE; - -module_param(queue_mode, int, 0444); -MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); - -#define MVPP2_DRIVER_NAME "mvpp2" -#define MVPP2_DRIVER_VERSION "1.0" - -/* Utility/helper methods */ - -static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) -{ - writel(data, priv->swth_base[0] + offset); -} - -static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) -{ - return readl(priv->swth_base[0] + offset); -} - -static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) -{ - return readl_relaxed(priv->swth_base[0] + offset); -} -/* These accessors should be used to access: - * - * - per-CPU registers, where each CPU has its own copy of the - * register. - * - * MVPP2_BM_VIRT_ALLOC_REG - * MVPP2_BM_ADDR_HIGH_ALLOC - * MVPP22_BM_ADDR_HIGH_RLS_REG - * MVPP2_BM_VIRT_RLS_REG - * MVPP2_ISR_RX_TX_CAUSE_REG - * MVPP2_ISR_RX_TX_MASK_REG - * MVPP2_TXQ_NUM_REG - * MVPP2_AGGR_TXQ_UPDATE_REG - * MVPP2_TXQ_RSVD_REQ_REG - * MVPP2_TXQ_RSVD_RSLT_REG - * MVPP2_TXQ_SENT_REG - * MVPP2_RXQ_NUM_REG - * - * - global registers that must be accessed through a specific CPU - * window, because they are related to an access to a per-CPU - * register - * - * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) - * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) - * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) - * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) - * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) - */ -static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, - u32 offset, u32 data) -{ - writel(data, priv->swth_base[cpu] + offset); -} - -static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, - u32 offset) -{ - return readl(priv->swth_base[cpu] + offset); -} - -static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, - u32 offset, u32 data) -{ - writel_relaxed(data, priv->swth_base[cpu] + offset); -} - -static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, - u32 offset) -{ - return readl_relaxed(priv->swth_base[cpu] + offset); -} - -static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc) -{ - if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.buf_dma_addr; - else - return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; -} - -static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - dma_addr_t dma_addr) -{ - dma_addr_t addr, offset; - - addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; - offset = dma_addr & MVPP2_TX_DESC_ALIGN; - - if (port->priv->hw_version == MVPP21) { - tx_desc->pp21.buf_dma_addr = addr; - tx_desc->pp21.packet_offset = offset; - } else { - u64 val = (u64)addr; - - tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; - tx_desc->pp22.buf_dma_addr_ptp |= val; - tx_desc->pp22.packet_offset = offset; - } -} - -static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc) -{ - if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.data_size; - else - return tx_desc->pp22.data_size; -} - -static void mvpp2_txdesc_size_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - size_t size) -{ - if (port->priv->hw_version == MVPP21) - tx_desc->pp21.data_size = size; - else - tx_desc->pp22.data_size = size; -} - -static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - unsigned int txq) -{ - if (port->priv->hw_version == MVPP21) - tx_desc->pp21.phys_txq = txq; - else - tx_desc->pp22.phys_txq = txq; -} - -static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - unsigned int command) -{ - if (port->priv->hw_version == MVPP21) - tx_desc->pp21.command = command; - else - tx_desc->pp22.command = command; -} - -static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc) -{ - if (port->priv->hw_version == MVPP21) - return tx_desc->pp21.packet_offset; - else - return tx_desc->pp22.packet_offset; -} - -static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_dma_addr; - else - return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; -} - -static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.buf_cookie; - else - return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; -} - -static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.data_size; - else - return rx_desc->pp22.data_size; -} - -static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - if (port->priv->hw_version == MVPP21) - return rx_desc->pp21.status; - else - return rx_desc->pp22.status; -} - -static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) -{ - txq_pcpu->txq_get_index++; - if (txq_pcpu->txq_get_index == txq_pcpu->size) - txq_pcpu->txq_get_index = 0; -} - -static void mvpp2_txq_inc_put(struct mvpp2_port *port, - struct mvpp2_txq_pcpu *txq_pcpu, - struct sk_buff *skb, - struct mvpp2_tx_desc *tx_desc) -{ - struct mvpp2_txq_pcpu_buf *tx_buf = - txq_pcpu->buffs + txq_pcpu->txq_put_index; - tx_buf->skb = skb; - tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); - tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + - mvpp2_txdesc_offset_get(port, tx_desc); - txq_pcpu->txq_put_index++; - if (txq_pcpu->txq_put_index == txq_pcpu->size) - txq_pcpu->txq_put_index = 0; -} - -/* Get number of physical egress port */ -static inline int mvpp2_egress_port(struct mvpp2_port *port) -{ - return MVPP2_MAX_TCONT + port->id; -} - -/* Get number of physical TXQ */ -static inline int mvpp2_txq_phys(int port, int txq) -{ - return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; -} - -/* Parser configuration routines */ - -/* Update parser tcam and sram hw entries */ -static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) -{ - int i; - - if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) - return -EINVAL; - - /* Clear entry invalidation bit */ - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; - - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); - - /* Write sram index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); - - return 0; -} - -/* Initialize tcam entry from hw */ -static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, - struct mvpp2_prs_entry *pe, int tid) -{ - int i; - - if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) - return -EINVAL; - - memset(pe, 0, sizeof(*pe)); - pe->index = tid; - - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - - pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, - MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); - if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) - return MVPP2_PRS_TCAM_ENTRY_INVALID; - - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); - - /* Write sram index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); - - return 0; -} - -/* Invalidate tcam hw entry */ -static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) -{ - /* Write index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), - MVPP2_PRS_TCAM_INV_MASK); -} - -/* Enable shadow table entry and set its lookup ID */ -static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) -{ - priv->prs_shadow[index].valid = true; - priv->prs_shadow[index].lu = lu; -} - -/* Update ri fields in shadow table entry */ -static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, - unsigned int ri, unsigned int ri_mask) -{ - priv->prs_shadow[index].ri_mask = ri_mask; - priv->prs_shadow[index].ri = ri; -} - -/* Update lookup field in tcam sw entry */ -static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; - pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; -} - -/* Update mask for single port in tcam sw entry */ -static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, - unsigned int port, bool add) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - if (add) - pe->tcam.byte[enable_off] &= ~(1 << port); - else - pe->tcam.byte[enable_off] |= 1 << port; -} - -/* Update port map in tcam sw entry */ -static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, - unsigned int ports) -{ - unsigned char port_mask = MVPP2_PRS_PORT_MASK; - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; - pe->tcam.byte[enable_off] &= ~port_mask; - pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; -} - -/* Obtain port map from tcam sw entry */ -static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) -{ - int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); - - return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; -} - -/* Set byte of data and its enable bits in tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char byte, - unsigned char enable) -{ - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; - pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; -} - -/* Get byte of data and its enable bits from tcam sw entry */ -static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, - unsigned int offs, unsigned char *byte, - unsigned char *enable) -{ - *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; - *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; -} - -/* Compare tcam data bytes with a pattern */ -static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, - u16 data) -{ - int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); - u16 tcam_data; - - tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; - if (tcam_data != data) - return false; - return true; -} - -/* Update ai bits in tcam sw entry */ -static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int enable) -{ - int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; - - for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { - - if (!(enable & BIT(i))) - continue; - - if (bits & BIT(i)) - pe->tcam.byte[ai_idx] |= 1 << i; - else - pe->tcam.byte[ai_idx] &= ~(1 << i); - } - - pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; -} - -/* Get ai bits from tcam sw entry */ -static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) -{ - return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; -} - -/* Set ethertype in tcam sw entry */ -static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, - unsigned short ethertype) -{ - mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); - mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); -} - -/* Set vid in tcam sw entry */ -static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, - unsigned short vid) -{ - mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); - mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); -} - -/* Set bits in sram sw entry */ -static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, - int val) -{ - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); -} - -/* Clear bits in sram sw entry */ -static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, - int val) -{ - pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); -} - -/* Update ri bits in sram sw entry */ -static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int mask) -{ - unsigned int i; - - for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { - int ri_off = MVPP2_PRS_SRAM_RI_OFFS; - - if (!(mask & BIT(i))) - continue; - - if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); - else - mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); - - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); - } -} - -/* Obtain ri bits from sram sw entry */ -static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) -{ - return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; -} - -/* Update ai bits in sram sw entry */ -static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, - unsigned int bits, unsigned int mask) -{ - unsigned int i; - int ai_off = MVPP2_PRS_SRAM_AI_OFFS; - - for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { - - if (!(mask & BIT(i))) - continue; - - if (bits & BIT(i)) - mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); - else - mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); - - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); - } -} - -/* Read ai bits from sram sw entry */ -static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) -{ - u8 bits; - int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); - int ai_en_off = ai_off + 1; - int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; - - bits = (pe->sram.byte[ai_off] >> ai_shift) | - (pe->sram.byte[ai_en_off] << (8 - ai_shift)); - - return bits; -} - -/* In sram sw entry set lookup ID field of the tcam key to be used in the next - * lookup interation - */ -static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, - unsigned int lu) -{ - int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; - - mvpp2_prs_sram_bits_clear(pe, sram_next_off, - MVPP2_PRS_SRAM_NEXT_LU_MASK); - mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); -} - -/* In the sram sw entry set sign and value of the next lookup offset - * and the offset value generated to the classifier - */ -static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, - unsigned int op) -{ - /* Set sign */ - if (shift < 0) { - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); - shift = 0 - shift; - } else { - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); - } - - /* Set value */ - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = - (unsigned char)shift; - - /* Reset and set operation */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); - - /* Set base offset as current */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); -} - -/* In the sram sw entry set sign and value of the user defined offset - * generated to the classifier - */ -static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, - unsigned int type, int offset, - unsigned int op) -{ - /* Set sign */ - if (offset < 0) { - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); - offset = 0 - offset; - } else { - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); - } - - /* Set value */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, - MVPP2_PRS_SRAM_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + - MVPP2_PRS_SRAM_UDF_BITS)] |= - (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); - - /* Set offset type */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, - MVPP2_PRS_SRAM_UDF_TYPE_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); - - /* Set offset operation */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, - MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); - mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= - ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> - (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + - MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= - (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); - - /* Set base offset as current */ - mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); -} - -/* Find parser flow entry */ -static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ - for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { - u8 bits; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - bits = mvpp2_prs_sram_ai_get(&pe); - - /* Sram store classification lookup ID in AI bits [5:0] */ - if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) - return tid; - } - - return -ENOENT; -} - -/* Return first free tcam index, seeking from start to end */ -static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, - unsigned char end) -{ - int tid; - - if (start > end) - swap(start, end); - - if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) - end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; - - for (tid = start; tid <= end; tid++) { - if (!priv->prs_shadow[tid].valid) - return tid; - } - - return -EINVAL; -} - -/* Enable/disable dropping all mac da's */ -static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) -{ - struct mvpp2_prs_entry pe; - - if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = MVPP2_PE_DROP_ALL; - - /* Non-promiscuous mode for all ports - DROP unknown packets */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set port to unicast or multicast promiscuous mode */ -static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) -{ - struct mvpp2_prs_entry pe; - unsigned char cast_match; - unsigned int ri; - int tid; - - if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { - cast_match = MVPP2_PRS_UCAST_VAL; - tid = MVPP2_PE_MAC_UC_PROMISCUOUS; - ri = MVPP2_PRS_RI_L2_UCAST; - } else { - cast_match = MVPP2_PRS_MCAST_VAL; - tid = MVPP2_PE_MAC_MC_PROMISCUOUS; - ri = MVPP2_PRS_RI_L2_MCAST; - } - - /* promiscuous mode - Accept unknown unicast or multicast packets */ - if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - pe.index = tid; - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set result info bits */ - mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); - - /* Match UC or MC addresses */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, - MVPP2_PRS_CAST_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set entry for dsa packets */ -static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, - bool tagged, bool extend) -{ - struct mvpp2_prs_entry pe; - int tid, shift; - - if (extend) { - tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; - shift = 8; - } else { - tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; - shift = 4; - } - - if (priv->prs_shadow[tid].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = tid; - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); - - if (tagged) { - /* Set tagged bit in DSA tag */ - mvpp2_prs_tcam_data_byte_set(&pe, 0, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - - /* Set ai bits for next iteration */ - if (extend) - mvpp2_prs_sram_ai_update(&pe, 1, - MVPP2_PRS_SRAM_AI_MASK); - else - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - - /* If packet is tagged continue check vid filtering */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - } else { - /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ - mvpp2_prs_sram_shift_set(&pe, shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Set result info bits to 'no vlans' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - } - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set entry for dsa ethertype */ -static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, - bool add, bool tagged, bool extend) -{ - struct mvpp2_prs_entry pe; - int tid, shift, port_mask; - - if (extend) { - tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : - MVPP2_PE_ETYPE_EDSA_UNTAGGED; - port_mask = 0; - shift = 8; - } else { - tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : - MVPP2_PE_ETYPE_DSA_UNTAGGED; - port_mask = MVPP2_PRS_PORT_MASK; - shift = 4; - } - - if (priv->prs_shadow[tid].valid) { - /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); - } else { - /* Entry doesn't exist - create new */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = tid; - - /* Set ethertype */ - mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); - mvpp2_prs_match_etype(&pe, 2, 0); - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, - MVPP2_PRS_RI_DSA_MASK); - /* Shift ethertype + 2 byte reserved + tag*/ - mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); - - if (tagged) { - /* Set tagged bit in DSA tag */ - mvpp2_prs_tcam_data_byte_set(&pe, - MVPP2_ETH_TYPE_LEN + 2 + 3, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT, - MVPP2_PRS_TCAM_DSA_TAGGED_BIT); - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, - MVPP2_PRS_SRAM_AI_MASK); - /* If packet is tagged continue check vlans */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - } else { - /* Set result info bits to 'no vlans' */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - } - /* Mask/unmask all ports, depending on dsa type */ - mvpp2_prs_tcam_port_map_set(&pe, port_mask); - } - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port, add); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Search for existing single/triple vlan entry */ -static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { - unsigned int ri_bits, ai_bits; - bool match; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); - if (!match) - continue; - - /* Get vlan type */ - ri_bits = mvpp2_prs_sram_ri_get(&pe); - ri_bits &= MVPP2_PRS_RI_VLAN_MASK; - - /* Get current ai value from tcam */ - ai_bits = mvpp2_prs_tcam_ai_get(&pe); - /* Clear double vlan bit */ - ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; - - if (ai != ai_bits) - continue; - - if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || - ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - return tid; - } - - return -ENOENT; -} - -/* Add/update single/triple vlan entry */ -static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, - unsigned int port_map) -{ - struct mvpp2_prs_entry pe; - int tid_aux, tid; - int ret = 0; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_vlan_find(priv, tpid, ai); - - if (tid < 0) { - /* Create new tcam entry */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, - MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) - return tid; - - /* Get last double vlan tid */ - for (tid_aux = MVPP2_PE_LAST_FREE_TID; - tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { - unsigned int ri_bits; - - if (!priv->prs_shadow[tid_aux].valid || - priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); - ri_bits = mvpp2_prs_sram_ri_get(&pe); - if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == - MVPP2_PRS_RI_VLAN_DOUBLE) - break; - } - - if (tid <= tid_aux) - return -EINVAL; - - memset(&pe, 0, sizeof(pe)); - pe.index = tid; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - - mvpp2_prs_match_etype(&pe, 0, tpid); - - /* VLAN tag detected, proceed with VID filtering */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, - MVPP2_PRS_RI_VLAN_MASK); - } else { - ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, - MVPP2_PRS_RI_VLAN_MASK); - } - mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(&pe, port_map); - - mvpp2_prs_hw_write(priv, &pe); - - return ret; -} - -/* Get first free double vlan ai number */ -static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) -{ - int i; - - for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { - if (!priv->prs_double_vlans[i]) - return i; - } - - return -EINVAL; -} - -/* Search for existing double vlan entry */ -static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, - unsigned short tpid2) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VLAN */ - for (tid = MVPP2_PE_FIRST_FREE_TID; - tid <= MVPP2_PE_LAST_FREE_TID; tid++) { - unsigned int ri_mask; - bool match; - - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && - mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); - - if (!match) - continue; - - ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; - if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) - return tid; - } - - return -ENOENT; -} - -/* Add or update double vlan entry */ -static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, - unsigned short tpid2, - unsigned int port_map) -{ - int tid_aux, tid, ai, ret = 0; - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); - - if (tid < 0) { - /* Create new tcam entry */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - /* Set ai value for new double vlan entry */ - ai = mvpp2_prs_double_vlan_ai_free_get(priv); - if (ai < 0) - return ai; - - /* Get first single/triple vlan tid */ - for (tid_aux = MVPP2_PE_FIRST_FREE_TID; - tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { - unsigned int ri_bits; - - if (!priv->prs_shadow[tid_aux].valid || - priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); - ri_bits = mvpp2_prs_sram_ri_get(&pe); - ri_bits &= MVPP2_PRS_RI_VLAN_MASK; - if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || - ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) - break; - } - - if (tid >= tid_aux) - return -ERANGE; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = tid; - - priv->prs_double_vlans[ai] = true; - - mvpp2_prs_match_etype(&pe, 0, tpid1); - mvpp2_prs_match_etype(&pe, 4, tpid2); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - /* Shift 4 bytes - skip outer vlan tag */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, - MVPP2_PRS_RI_VLAN_MASK); - mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, - MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - /* Update ports' mask */ - mvpp2_prs_tcam_port_map_set(&pe, port_map); - mvpp2_prs_hw_write(priv, &pe); - - return ret; -} - -/* IPv4 header parsing for fragmentation and L4 offset */ -static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, - unsigned int ri, unsigned int ri_mask) -{ - struct mvpp2_prs_entry pe; - int tid; - - if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && - (proto != IPPROTO_IGMP)) - return -EINVAL; - - /* Not fragmented packet */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = tid; - - /* Set next lu to IPv4 */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, - MVPP2_PRS_TCAM_PROTO_MASK_L); - mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, - MVPP2_PRS_TCAM_PROTO_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Fragmented packet */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); - - mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, - ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); - mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* IPv4 L3 multicast or broadcast */ -static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) -{ - struct mvpp2_prs_entry pe; - int mask, tid; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = tid; - - switch (l3_cast) { - case MVPP2_PRS_L3_MULTI_CAST: - mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, - MVPP2_PRS_IPV4_MC_MASK); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - break; - case MVPP2_PRS_L3_BROAD_CAST: - mask = MVPP2_PRS_IPV4_BC_MASK; - mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); - mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - break; - default: - return -EINVAL; - } - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Set entries for protocols over IPv6 */ -static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, - unsigned int ri, unsigned int ri_mask) -{ - struct mvpp2_prs_entry pe; - int tid; - - if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && - (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) - return -EINVAL; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct ipv6hdr) - 6, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Write HW */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* IPv6 L3 multicast entry */ -static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) -{ - struct mvpp2_prs_entry pe; - int tid; - - if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) - return -EINVAL; - - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Shift back to IPv6 NH */ - mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, - MVPP2_PRS_IPV6_MC_MASK); - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Parser per-port initialization */ -static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, - int lu_max, int offset) -{ - u32 val; - - /* Set lookup ID */ - val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); - val &= ~MVPP2_PRS_PORT_LU_MASK(port); - val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); - mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); - - /* Set maximum number of loops for packet received from port */ - val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); - val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); - val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); - mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); - - /* Set initial offset for packet header extraction for the first - * searching loop - */ - val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); - val &= ~MVPP2_PRS_INIT_OFF_MASK(port); - val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); - mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); -} - -/* Default flow entries initialization for all ports */ -static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int port; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Set flow ID*/ - mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_hw_write(priv, &pe); - } -} - -/* Set default entry for Marvell Header field */ -static void mvpp2_prs_mh_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - pe.index = MVPP2_PE_MH_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); - mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Set default entires (place holder) for promiscuous, non-promiscuous and - * multicast MAC addresses - */ -static void mvpp2_prs_mac_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - /* Non-promiscuous mode for all ports - DROP unknown packets */ - pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, &pe); - - /* Create dummy entries for drop all and promiscuous modes */ - mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); -} - -/* Set default entries for various types of dsa packets */ -static void mvpp2_prs_dsa_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - /* None tagged EDSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, - MVPP2_PRS_EDSA); - - /* Tagged EDSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - - /* None tagged DSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, - MVPP2_PRS_DSA); - - /* Tagged DSA entry - place holder */ - mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - - /* None tagged EDSA ethertype entry - place holder*/ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - - /* Tagged EDSA ethertype entry - place holder*/ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - - /* None tagged DSA ethertype entry */ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - - /* Tagged DSA ethertype entry */ - mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - - /* Set default entry, in case DSA or EDSA tag not found */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); - pe.index = MVPP2_PE_DSA_DEFAULT; - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); - - /* Shift 0 bytes */ - mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - - /* Clear all sram ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - mvpp2_prs_hw_write(priv, &pe); -} - -/* Initialize parser entries for VID filtering */ -static void mvpp2_prs_vid_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - - memset(&pe, 0, sizeof(pe)); - - /* Set default vid entry */ - pe.index = MVPP2_PE_VID_FLTR_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); - - /* Skip VLAN header - Set offset to 4 bytes */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); - - /* Set default vid entry for extended DSA*/ - memset(&pe, 0, sizeof(pe)); - - /* Set default vid entry */ - pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, - MVPP2_PRS_EDSA_VID_AI_BIT); - - /* Skip VLAN header - Set offset to 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Match basic ethertypes */ -static int mvpp2_prs_etype_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Ethertype: PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); - - mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, - MVPP2_PRS_RI_PPPOE_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, - MVPP2_PRS_RI_PPPOE_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: ARP */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: LBTD */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IP header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv4 with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Clear tcam data before updating */ - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; - pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; - - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD, - MVPP2_PRS_IPV4_HEAD_MASK); - - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Ethertype: IPv6 without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); - - /* Skip DIP of IPV6 header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + - MVPP2_MAX_L3_ADDR_SIZE, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = false; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); - pe.index = MVPP2_PE_ETH_TYPE_UN; - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Generate flow in the next iteration*/ - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Set L3 offset even it's unknown L3 */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; - priv->prs_shadow[pe.index].finish = true; - mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Configure vlan entries and detect up to 2 successive VLAN tags. - * Possible options: - * 0x8100, 0x88A8 - * 0x8100, 0x8100 - * 0x8100 - * 0x88A8 - */ -static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int err; - - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - - /* Double VLAN: 0x8100, 0x88A8 */ - err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Double VLAN: 0x8100, 0x8100 */ - err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Single VLAN: 0x88a8 */ - err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Single VLAN: 0x8100 */ - err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, - MVPP2_PRS_PORT_MASK); - if (err) - return err; - - /* Set default double vlan entry */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = MVPP2_PE_VLAN_DBL; - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Clear ai for next iterations */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, - MVPP2_PRS_RI_VLAN_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, - MVPP2_PRS_DBL_VLAN_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - mvpp2_prs_hw_write(priv, &pe); - - /* Set default vlan none entry */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); - pe.index = MVPP2_PE_VLAN_NONE; - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, - MVPP2_PRS_RI_VLAN_MASK); - - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Set entries for PPPoE ethertype */ -static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* IPv4 over PPPoE with options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, PPP_IP); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IP header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* IPv4 over PPPoE without options */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, - MVPP2_PRS_IPV4_HEAD_MASK | - MVPP2_PRS_IPV4_IHL_MASK); - - /* Clear ri before updating */ - pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; - pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* IPv6 over PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); - - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, - MVPP2_PRS_RI_L3_PROTO_MASK); - /* Skip eth_type + 4 bytes of IPv6 header */ - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L3 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - /* Non-IP over PPPoE */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); - pe.index = tid; - - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, - MVPP2_PRS_RI_L3_PROTO_MASK); - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - /* Set L3 offset even if it's unknown L3 */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, - MVPP2_ETH_TYPE_LEN, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Initialize entries for IPv4 */ -static int mvpp2_prs_ip4_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int err; - - /* Set entries for TCP, UDP and IGMP over IPv4 */ - err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, - MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - if (err) - return err; - - /* IPv4 Broadcast */ - err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); - if (err) - return err; - - /* IPv4 Multicast */ - err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); - if (err) - return err; - - /* Default IPv4 entry for unknown protocols */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = MVPP2_PE_IP4_PROTO_UN; - - /* Set next lu to IPv4 */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); - mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - /* Set L4 offset */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct iphdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv4 entry for unicast address */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); - pe.index = MVPP2_PE_IP4_ADDR_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, - MVPP2_PRS_IPV4_DIP_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Initialize entries for IPv6 */ -static int mvpp2_prs_ip6_init(struct mvpp2 *priv) -{ - struct mvpp2_prs_entry pe; - int tid, err; - - /* Set entries for TCP, UDP and ICMP over IPv6 */ - err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, - MVPP2_PRS_RI_L4_TCP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, - MVPP2_PRS_RI_L4_UDP, - MVPP2_PRS_RI_L4_PROTO_MASK); - if (err) - return err; - - err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, - MVPP2_PRS_RI_CPU_CODE_RX_SPEC | - MVPP2_PRS_RI_UDF3_RX_SPECIAL, - MVPP2_PRS_RI_CPU_CODE_MASK | - MVPP2_PRS_RI_UDF3_MASK); - if (err) - return err; - - /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ - /* Result Info: UDF7=1, DS lite */ - err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, - MVPP2_PRS_RI_UDF7_IP6_LITE, - MVPP2_PRS_RI_UDF7_MASK); - if (err) - return err; - - /* IPv6 multicast */ - err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); - if (err) - return err; - - /* Entry for checking hop limit */ - tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, - MVPP2_PE_LAST_FREE_TID); - if (tid < 0) - return tid; - - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = tid; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | - MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_L3_PROTO_MASK | - MVPP2_PRS_RI_DROP_MASK); - - mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unknown protocols */ - memset(&pe, 0, sizeof(pe)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_PROTO_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - /* Set L4 offset relatively to our current place */ - mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, - sizeof(struct ipv6hdr) - 4, - MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unknown ext protocols */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; - - /* Finished: go to flowid generation */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, - MVPP2_PRS_RI_L4_PROTO_MASK); - - mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, - MVPP2_PRS_IPV6_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); - mvpp2_prs_hw_write(priv, &pe); - - /* Default IPv6 entry for unicast address */ - memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); - pe.index = MVPP2_PE_IP6_ADDR_UN; - - /* Finished: go to IPv6 again */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, - MVPP2_PRS_RI_L3_ADDR_MASK); - mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, - MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Shift back to IPV6 NH */ - mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); - /* Unmask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); - - /* Update shadow table and hw entry */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Find tcam entry with matched pair <vid,port> */ -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, - u16 mask) -{ - unsigned char byte[2], enable[2]; - struct mvpp2_prs_entry pe; - u16 rvid, rmask; - int tid; - - /* Go through the all entries with MVPP2_PRS_LU_VID */ - for (tid = MVPP2_PE_VID_FILT_RANGE_START; - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { - if (!priv->prs_shadow[tid].valid || - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); - mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); - - rvid = ((byte[0] & 0xf) << 8) + byte[1]; - rmask = ((enable[0] & 0xf) << 8) + enable[1]; - - if (rvid != vid || rmask != mask) - continue; - - return tid; - } - - return -ENOENT; -} - -/* Write parser entry for VID filtering */ -static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) -{ - unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + - port->id * MVPP2_PRS_VLAN_FILT_MAX; - unsigned int mask = 0xfff, reg_val, shift; - struct mvpp2 *priv = port->priv; - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); - - reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); - if (reg_val & MVPP2_DSA_EXTENDED) - shift = MVPP2_VLAN_TAG_EDSA_LEN; - else - shift = MVPP2_VLAN_TAG_LEN; - - /* No such entry */ - if (tid < 0) { - - /* Go through all entries from first to last in vlan range */ - tid = mvpp2_prs_tcam_first_free(priv, vid_start, - vid_start + - MVPP2_PRS_VLAN_FILT_MAX_ENTRY); - - /* There isn't room for a new VID filter */ - if (tid < 0) - return tid; - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - pe.index = tid; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - /* Enable the current port */ - mvpp2_prs_tcam_port_set(&pe, port->id, true); - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Skip VLAN header - Set offset to 4 or 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Set match on VID */ - mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -/* Write parser entry for VID filtering */ -static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) -{ - struct mvpp2 *priv = port->priv; - int tid; - - /* Scan TCAM and see if entry with this <vid,port> already exist */ - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); - - /* No such entry */ - if (tid < 0) - return; - - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; -} - -/* Remove all existing VID filters on this port */ -static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - int tid; - - for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); - tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { - if (priv->prs_shadow[tid].valid) - mvpp2_prs_vid_entry_remove(port, tid); - } -} - -/* Remove VID filering entry for this port */ -static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) -{ - unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); - struct mvpp2 *priv = port->priv; - - /* Invalidate the guard entry */ - mvpp2_prs_hw_inv(priv, tid); - - priv->prs_shadow[tid].valid = false; -} - -/* Add guard entry that drops packets when no VID is matched on this port */ -static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) -{ - unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); - struct mvpp2 *priv = port->priv; - unsigned int reg_val, shift; - struct mvpp2_prs_entry pe; - - if (priv->prs_shadow[tid].valid) - return; - - memset(&pe, 0, sizeof(pe)); - - pe.index = tid; - - reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); - if (reg_val & MVPP2_DSA_EXTENDED) - shift = MVPP2_VLAN_TAG_EDSA_LEN; - else - shift = MVPP2_VLAN_TAG_LEN; - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port->id, true); - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); - - /* Skip VLAN header - Set offset to 4 or 8 bytes */ - mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Drop VLAN packets that don't belong to any VIDs on this port */ - mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, - MVPP2_PRS_RI_DROP_MASK); - - /* Clear all ai bits for next iteration */ - mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); - - /* Update shadow table */ - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); - mvpp2_prs_hw_write(priv, &pe); -} - -/* Parser default initialization */ -static int mvpp2_prs_default_init(struct platform_device *pdev, - struct mvpp2 *priv) -{ - int err, index, i; - - /* Enable tcam table */ - mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); - - /* Clear all tcam and sram entries */ - for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); - - mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); - for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); - } - - /* Invalidate all tcam entries */ - for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) - mvpp2_prs_hw_inv(priv, index); - - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - - /* Always start from lookup = 0 */ - for (index = 0; index < MVPP2_MAX_PORTS; index++) - mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, - MVPP2_PRS_PORT_LU_MAX, 0); - - mvpp2_prs_def_flow_init(priv); - - mvpp2_prs_mh_init(priv); - - mvpp2_prs_mac_init(priv); - - mvpp2_prs_dsa_init(priv); - - mvpp2_prs_vid_init(priv); - - err = mvpp2_prs_etype_init(priv); - if (err) - return err; - - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; - - return 0; -} - -/* Compare MAC DA with tcam entry data */ -static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, - const u8 *da, unsigned char *mask) -{ - unsigned char tcam_byte, tcam_mask; - int index; - - for (index = 0; index < ETH_ALEN; index++) { - mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); - if (tcam_mask != mask[index]) - return false; - - if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) - return false; - } - - return true; -} - -/* Find tcam entry with matched pair <MAC DA, port> */ -static int -mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, - unsigned char *mask, int udf_type) -{ - struct mvpp2_prs_entry pe; - int tid; - - /* Go through the all entires with MVPP2_PRS_LU_MAC */ - for (tid = MVPP2_PE_MAC_RANGE_START; - tid <= MVPP2_PE_MAC_RANGE_END; tid++) { - unsigned int entry_pmap; - - if (!priv->prs_shadow[tid].valid || - (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || - (priv->prs_shadow[tid].udf != udf_type)) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); - - if (mvpp2_prs_mac_range_equals(&pe, da, mask) && - entry_pmap == pmap) - return tid; - } - - return -ENOENT; -} - -/* Update parser's mac da entry */ -static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, - bool add) -{ - unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - struct mvpp2 *priv = port->priv; - unsigned int pmap, len, ri; - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ - tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, - MVPP2_PRS_UDF_MAC_DEF); - - /* No such entry */ - if (tid < 0) { - if (!add) - return 0; - - /* Create new TCAM entry */ - /* Go through the all entries from first to last */ - tid = mvpp2_prs_tcam_first_free(priv, - MVPP2_PE_MAC_RANGE_START, - MVPP2_PE_MAC_RANGE_END); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Mask all ports */ - mvpp2_prs_tcam_port_map_set(&pe, 0); - } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); - } - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); - - /* Update port mask */ - mvpp2_prs_tcam_port_set(&pe, port->id, add); - - /* Invalidate the entry if no ports are left enabled */ - pmap = mvpp2_prs_tcam_port_map_get(&pe); - if (pmap == 0) { - if (add) - return -EINVAL; - - mvpp2_prs_hw_inv(priv, pe.index); - priv->prs_shadow[pe.index].valid = false; - return 0; - } - - /* Continue - set next lookup */ - mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); - - /* Set match on DA */ - len = ETH_ALEN; - while (len--) - mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); - - /* Set result info bits */ - if (is_broadcast_ether_addr(da)) { - ri = MVPP2_PRS_RI_L2_BCAST; - } else if (is_multicast_ether_addr(da)) { - ri = MVPP2_PRS_RI_L2_MCAST; - } else { - ri = MVPP2_PRS_RI_L2_UCAST; - - if (ether_addr_equal(da, port->dev->dev_addr)) - ri |= MVPP2_PRS_RI_MAC_ME_MASK; - } - - mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | - MVPP2_PRS_RI_MAC_ME_MASK); - mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | - MVPP2_PRS_RI_MAC_ME_MASK); - - /* Shift to ethertype */ - mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, - MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); - - /* Update shadow table and hw entry */ - priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; - mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); - mvpp2_prs_hw_write(priv, &pe); - - return 0; -} - -static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) -{ - struct mvpp2_port *port = netdev_priv(dev); - int err; - - /* Remove old parser entry */ - err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); - if (err) - return err; - - /* Add new parser entry */ - err = mvpp2_prs_mac_da_accept(port, da, true); - if (err) - return err; - - /* Set addr in the device */ - ether_addr_copy(dev->dev_addr, da); - - return 0; -} - -static void mvpp2_prs_mac_del_all(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - struct mvpp2_prs_entry pe; - unsigned long pmap; - int index, tid; - - for (tid = MVPP2_PE_MAC_RANGE_START; - tid <= MVPP2_PE_MAC_RANGE_END; tid++) { - unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; - - if (!priv->prs_shadow[tid].valid || - (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || - (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) - continue; - - mvpp2_prs_init_from_hw(priv, &pe, tid); - - pmap = mvpp2_prs_tcam_port_map_get(&pe); - - /* We only want entries active on this port */ - if (!test_bit(port->id, &pmap)) - continue; - - /* Read mac addr from entry */ - for (index = 0; index < ETH_ALEN; index++) - mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], - &da_mask[index]); - - /* Special cases : Don't remove broadcast and port's own - * address - */ - if (is_broadcast_ether_addr(da) || - ether_addr_equal(da, port->dev->dev_addr)) - continue; - - /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); - } -} - -static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) -{ - switch (type) { - case MVPP2_TAG_TYPE_EDSA: - /* Add port to EDSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - /* Remove port from DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - break; - - case MVPP2_TAG_TYPE_DSA: - /* Add port to DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, true, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - /* Remove port from EDSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - break; - - case MVPP2_TAG_TYPE_MH: - case MVPP2_TAG_TYPE_NONE: - /* Remove port form EDSA and DSA entries */ - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); - mvpp2_prs_dsa_tag_set(priv, port, false, - MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); - break; - - default: - if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) - return -EINVAL; - } - - return 0; -} - -/* Set prs flow for the port */ -static int mvpp2_prs_def_flow(struct mvpp2_port *port) -{ - struct mvpp2_prs_entry pe; - int tid; - - memset(&pe, 0, sizeof(pe)); - - tid = mvpp2_prs_flow_find(port->priv, port->id); - - /* Such entry not exist */ - if (tid < 0) { - /* Go through the all entires from last to first */ - tid = mvpp2_prs_tcam_first_free(port->priv, - MVPP2_PE_LAST_FREE_TID, - MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) - return tid; - - pe.index = tid; - - /* Set flow ID*/ - mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); - mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); - - /* Update shadow table */ - mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); - } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); - } - - mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); - mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); - mvpp2_prs_hw_write(port->priv, &pe); - - return 0; -} - -/* Classifier configuration routines */ - -/* Update classification flow table registers */ -static void mvpp2_cls_flow_write(struct mvpp2 *priv, - struct mvpp2_cls_flow_entry *fe) -{ - mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); - mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); -} - -/* Update classification lookup table register */ -static void mvpp2_cls_lookup_write(struct mvpp2 *priv, - struct mvpp2_cls_lookup_entry *le) -{ - u32 val; - - val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; - mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); - mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); -} - -/* Classifier default initialization */ -static void mvpp2_cls_init(struct mvpp2 *priv) -{ - struct mvpp2_cls_lookup_entry le; - struct mvpp2_cls_flow_entry fe; - int index; - - /* Enable classifier */ - mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); - - /* Clear classifier flow table */ - memset(&fe.data, 0, sizeof(fe.data)); - for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { - fe.index = index; - mvpp2_cls_flow_write(priv, &fe); - } - - /* Clear classifier lookup table */ - le.data = 0; - for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { - le.lkpid = index; - le.way = 0; - mvpp2_cls_lookup_write(priv, &le); - - le.way = 1; - mvpp2_cls_lookup_write(priv, &le); - } -} - -static void mvpp2_cls_port_config(struct mvpp2_port *port) -{ - struct mvpp2_cls_lookup_entry le; - u32 val; - - /* Set way for the port */ - val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); - val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); - mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); - - /* Pick the entry to be accessed in lookup ID decoding table - * according to the way and lkpid. - */ - le.lkpid = port->id; - le.way = 0; - le.data = 0; - - /* Set initial CPU queue for receiving packets */ - le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; - le.data |= port->first_rxq; - - /* Disable classification engines */ - le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; - - /* Update lookup ID table entry */ - mvpp2_cls_lookup_write(port->priv, &le); -} - -/* Set CPU queue number for oversize packets */ -static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) -{ - u32 val; - - mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), - port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); - - mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), - (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); - - val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); - val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); - mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); -} - -static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) -{ - if (likely(pool->frag_size <= PAGE_SIZE)) - return netdev_alloc_frag(pool->frag_size); - else - return kmalloc(pool->frag_size, GFP_ATOMIC); -} - -static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) -{ - if (likely(pool->frag_size <= PAGE_SIZE)) - skb_free_frag(data); - else - kfree(data); -} - -/* Buffer Manager configuration routines */ - -/* Create pool */ -static int mvpp2_bm_pool_create(struct platform_device *pdev, - struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, int size) -{ - u32 val; - - /* Number of buffer pointers must be a multiple of 16, as per - * hardware constraints - */ - if (!IS_ALIGNED(size, 16)) - return -EINVAL; - - /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 - * bytes per buffer pointer - */ - if (priv->hw_version == MVPP21) - bm_pool->size_bytes = 2 * sizeof(u32) * size; - else - bm_pool->size_bytes = 2 * sizeof(u64) * size; - - bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, - &bm_pool->dma_addr, - GFP_KERNEL); - if (!bm_pool->virt_addr) - return -ENOMEM; - - if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, - MVPP2_BM_POOL_PTR_ALIGN)) { - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, - bm_pool->virt_addr, bm_pool->dma_addr); - dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", - bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); - return -ENOMEM; - } - - mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), - lower_32_bits(bm_pool->dma_addr)); - mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); - - val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); - val |= MVPP2_BM_START_MASK; - mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - - bm_pool->size = size; - bm_pool->pkt_size = 0; - bm_pool->buf_num = 0; - - return 0; -} - -/* Set pool buffer size */ -static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, - int buf_size) -{ - u32 val; - - bm_pool->buf_size = buf_size; - - val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); - mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); -} - -static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, - dma_addr_t *dma_addr, - phys_addr_t *phys_addr) -{ - int cpu = get_cpu(); - - *dma_addr = mvpp2_percpu_read(priv, cpu, - MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); - - if (priv->hw_version == MVPP22) { - u32 val; - u32 dma_addr_highbits, phys_addr_highbits; - - val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); - dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); - phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> - MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; - - if (sizeof(dma_addr_t) == 8) - *dma_addr |= (u64)dma_addr_highbits << 32; - - if (sizeof(phys_addr_t) == 8) - *phys_addr |= (u64)phys_addr_highbits << 32; - } - - put_cpu(); -} - -/* Free all buffers from the pool */ -static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool, int buf_num) -{ - int i; - - if (buf_num > bm_pool->buf_num) { - WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", - bm_pool->id, buf_num); - buf_num = bm_pool->buf_num; - } - - for (i = 0; i < buf_num; i++) { - dma_addr_t buf_dma_addr; - phys_addr_t buf_phys_addr; - void *data; - - mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, - &buf_dma_addr, &buf_phys_addr); - - dma_unmap_single(dev, buf_dma_addr, - bm_pool->buf_size, DMA_FROM_DEVICE); - - data = (void *)phys_to_virt(buf_phys_addr); - if (!data) - break; - - mvpp2_frag_free(bm_pool, data); - } - - /* Update BM driver with number of buffers removed from pool */ - bm_pool->buf_num -= i; -} - -/* Check number of buffers in BM pool */ -static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) -{ - int buf_num = 0; - - buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & - MVPP22_BM_POOL_PTRS_NUM_MASK; - buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & - MVPP2_BM_BPPI_PTR_NUM_MASK; - - /* HW has one buffer ready which is not reflected in the counters */ - if (buf_num) - buf_num += 1; - - return buf_num; -} - -/* Cleanup pool */ -static int mvpp2_bm_pool_destroy(struct platform_device *pdev, - struct mvpp2 *priv, - struct mvpp2_bm_pool *bm_pool) -{ - int buf_num; - u32 val; - - buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); - mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); - - /* Check buffer counters after free */ - buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); - if (buf_num) { - WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", - bm_pool->id, bm_pool->buf_num); - return 0; - } - - val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); - val |= MVPP2_BM_STOP_MASK; - mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - - dma_free_coherent(&pdev->dev, bm_pool->size_bytes, - bm_pool->virt_addr, - bm_pool->dma_addr); - return 0; -} - -static int mvpp2_bm_pools_init(struct platform_device *pdev, - struct mvpp2 *priv) -{ - int i, err, size; - struct mvpp2_bm_pool *bm_pool; - - /* Create all pools with maximum size */ - size = MVPP2_BM_POOL_SIZE_MAX; - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { - bm_pool = &priv->bm_pools[i]; - bm_pool->id = i; - err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); - if (err) - goto err_unroll_pools; - mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); - } - return 0; - -err_unroll_pools: - dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); - for (i = i - 1; i >= 0; i--) - mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); - return err; -} - -static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) -{ - int i, err; - - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { - /* Mask BM all interrupts */ - mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); - /* Clear BM cause register */ - mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); - } - - /* Allocate and initialize BM pools */ - priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, - sizeof(*priv->bm_pools), GFP_KERNEL); - if (!priv->bm_pools) - return -ENOMEM; - - err = mvpp2_bm_pools_init(pdev, priv); - if (err < 0) - return err; - return 0; -} - -static void mvpp2_setup_bm_pool(void) -{ - /* Short pool */ - mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; - mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; - - /* Long pool */ - mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; - mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; - - /* Jumbo pool */ - mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; - mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; -} - -/* Attach long pool to rxq */ -static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, - int lrxq, int long_pool) -{ - u32 val, mask; - int prxq; - - /* Get queue physical ID */ - prxq = port->rxqs[lrxq]->id; - - if (port->priv->hw_version == MVPP21) - mask = MVPP21_RXQ_POOL_LONG_MASK; - else - mask = MVPP22_RXQ_POOL_LONG_MASK; - - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~mask; - val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); -} - -/* Attach short pool to rxq */ -static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, - int lrxq, int short_pool) -{ - u32 val, mask; - int prxq; - - /* Get queue physical ID */ - prxq = port->rxqs[lrxq]->id; - - if (port->priv->hw_version == MVPP21) - mask = MVPP21_RXQ_POOL_SHORT_MASK; - else - mask = MVPP22_RXQ_POOL_SHORT_MASK; - - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~mask; - val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); -} - -static void *mvpp2_buf_alloc(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, - dma_addr_t *buf_dma_addr, - phys_addr_t *buf_phys_addr, - gfp_t gfp_mask) -{ - dma_addr_t dma_addr; - void *data; - - data = mvpp2_frag_alloc(bm_pool); - if (!data) - return NULL; - - dma_addr = dma_map_single(port->dev->dev.parent, data, - MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { - mvpp2_frag_free(bm_pool, data); - return NULL; - } - *buf_dma_addr = dma_addr; - *buf_phys_addr = virt_to_phys(data); - - return data; -} - -/* Release buffer to BM */ -static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, - dma_addr_t buf_dma_addr, - phys_addr_t buf_phys_addr) -{ - int cpu = get_cpu(); - - if (port->priv->hw_version == MVPP22) { - u32 val = 0; - - if (sizeof(dma_addr_t) == 8) - val |= upper_32_bits(buf_dma_addr) & - MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; - - if (sizeof(phys_addr_t) == 8) - val |= (upper_32_bits(buf_phys_addr) - << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & - MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; - - mvpp2_percpu_write_relaxed(port->priv, cpu, - MVPP22_BM_ADDR_HIGH_RLS_REG, val); - } - - /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply - * returned in the "cookie" field of the RX - * descriptor. Instead of storing the virtual address, we - * store the physical address - */ - mvpp2_percpu_write_relaxed(port->priv, cpu, - MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write_relaxed(port->priv, cpu, - MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); - - put_cpu(); -} - -/* Allocate buffers for the pool */ -static int mvpp2_bm_bufs_add(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, int buf_num) -{ - int i, buf_size, total_size; - dma_addr_t dma_addr; - phys_addr_t phys_addr; - void *buf; - - buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); - total_size = MVPP2_RX_TOTAL_SIZE(buf_size); - - if (buf_num < 0 || - (buf_num + bm_pool->buf_num > bm_pool->size)) { - netdev_err(port->dev, - "cannot allocate %d buffers for pool %d\n", - buf_num, bm_pool->id); - return 0; - } - - for (i = 0; i < buf_num; i++) { - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, - &phys_addr, GFP_KERNEL); - if (!buf) - break; - - mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, - phys_addr); - } - - /* Update BM driver with number of buffers added to pool */ - bm_pool->buf_num += i; - - netdev_dbg(port->dev, - "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", - bm_pool->id, bm_pool->pkt_size, buf_size, total_size); - - netdev_dbg(port->dev, - "pool %d: %d of %d buffers added\n", - bm_pool->id, i, buf_num); - return i; -} - -/* Notify the driver that BM pool is being used as specific type and return the - * pool pointer on success - */ -static struct mvpp2_bm_pool * -mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) -{ - struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; - int num; - - if (pool >= MVPP2_BM_POOLS_NUM) { - netdev_err(port->dev, "Invalid pool %d\n", pool); - return NULL; - } - - /* Allocate buffers in case BM pool is used as long pool, but packet - * size doesn't match MTU or BM pool hasn't being used yet - */ - if (new_pool->pkt_size == 0) { - int pkts_num; - - /* Set default buffer number or free all the buffers in case - * the pool is not empty - */ - pkts_num = new_pool->buf_num; - if (pkts_num == 0) - pkts_num = mvpp2_pools[pool].buf_num; - else - mvpp2_bm_bufs_free(port->dev->dev.parent, - port->priv, new_pool, pkts_num); - - new_pool->pkt_size = pkt_size; - new_pool->frag_size = - SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + - MVPP2_SKB_SHINFO_SIZE; - - /* Allocate buffers for this pool */ - num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); - if (num != pkts_num) { - WARN(1, "pool %d: %d of %d allocated\n", - new_pool->id, num, pkts_num); - return NULL; - } - } - - mvpp2_bm_pool_bufsize_set(port->priv, new_pool, - MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); - - return new_pool; -} - -/* Initialize pools for swf */ -static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) -{ - int rxq; - enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; - - /* If port pkt_size is higher than 1518B: - * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool - * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool - */ - if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { - long_log_pool = MVPP2_BM_JUMBO; - short_log_pool = MVPP2_BM_LONG; - } else { - long_log_pool = MVPP2_BM_LONG; - short_log_pool = MVPP2_BM_SHORT; - } - - if (!port->pool_long) { - port->pool_long = - mvpp2_bm_pool_use(port, long_log_pool, - mvpp2_pools[long_log_pool].pkt_size); - if (!port->pool_long) - return -ENOMEM; - - port->pool_long->port_map |= BIT(port->id); - - for (rxq = 0; rxq < port->nrxqs; rxq++) - mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); - } - - if (!port->pool_short) { - port->pool_short = - mvpp2_bm_pool_use(port, short_log_pool, - mvpp2_pools[short_log_pool].pkt_size); - if (!port->pool_short) - return -ENOMEM; - - port->pool_short->port_map |= BIT(port->id); - - for (rxq = 0; rxq < port->nrxqs; rxq++) - mvpp2_rxq_short_pool_set(port, rxq, - port->pool_short->id); - } - - return 0; -} - -static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) -{ - struct mvpp2_port *port = netdev_priv(dev); - enum mvpp2_bm_pool_log_num new_long_pool; - int pkt_size = MVPP2_RX_PKT_SIZE(mtu); - - /* If port MTU is higher than 1518B: - * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool - * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool - */ - if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) - new_long_pool = MVPP2_BM_JUMBO; - else - new_long_pool = MVPP2_BM_LONG; - - if (new_long_pool != port->pool_long->id) { - /* Remove port from old short & long pool */ - port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, - port->pool_long->pkt_size); - port->pool_long->port_map &= ~BIT(port->id); - port->pool_long = NULL; - - port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, - port->pool_short->pkt_size); - port->pool_short->port_map &= ~BIT(port->id); - port->pool_short = NULL; - - port->pkt_size = pkt_size; - - /* Add port to new short & long pool */ - mvpp2_swf_bm_pool_init(port); - - /* Update L4 checksum when jumbo enable/disable on port */ - if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - dev->hw_features &= ~(NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM); - } else { - dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - } - } - - dev->mtu = mtu; - dev->wanted_features = dev->features; - - netdev_update_features(dev); - return 0; -} - -static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) -{ - int i, sw_thread_mask = 0; - - for (i = 0; i < port->nqvecs; i++) - sw_thread_mask |= port->qvecs[i].sw_thread_mask; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); -} - -static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) -{ - int i, sw_thread_mask = 0; - - for (i = 0; i < port->nqvecs; i++) - sw_thread_mask |= port->qvecs[i].sw_thread_mask; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); -} - -static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) -{ - struct mvpp2_port *port = qvec->port; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); -} - -static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) -{ - struct mvpp2_port *port = qvec->port; - - mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); -} - -/* Mask the current CPU's Rx/Tx interrupts - * Called by on_each_cpu(), guaranteed to run with migration disabled, - * using smp_processor_id() is OK. - */ -static void mvpp2_interrupts_mask(void *arg) -{ - struct mvpp2_port *port = arg; - - mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); -} - -/* Unmask the current CPU's Rx/Tx interrupts. - * Called by on_each_cpu(), guaranteed to run with migration disabled, - * using smp_processor_id() is OK. - */ -static void mvpp2_interrupts_unmask(void *arg) -{ - struct mvpp2_port *port = arg; - u32 val; - - val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; - if (port->has_tx_irqs) - val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - - mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_ISR_RX_TX_MASK_REG(port->id), val); -} - -static void -mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) -{ - u32 val; - int i; - - if (port->priv->hw_version != MVPP22) - return; - - if (mask) - val = 0; - else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; - - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *v = port->qvecs + i; - - if (v->type != MVPP2_QUEUE_VECTOR_SHARED) - continue; - - mvpp2_percpu_write(port->priv, v->sw_thread_id, - MVPP2_ISR_RX_TX_MASK_REG(port->id), val); - } -} - -/* Port configuration routines */ - -static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); - val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); - - regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); - if (port->gop_id == 2) - val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; - else if (port->gop_id == 3) - val |= GENCONF_CTRL0_PORT1_RGMII_MII; - regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); -} - -static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); - val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | - GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); - - if (port->gop_id > 1) { - regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); - if (port->gop_id == 2) - val &= ~GENCONF_CTRL0_PORT0_RGMII; - else if (port->gop_id == 3) - val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; - regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); - } -} - -static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); - void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); - u32 val; - - /* XPCS */ - val = readl(xpcs + MVPP22_XPCS_CFG0); - val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | - MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); - val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); - writel(val, xpcs + MVPP22_XPCS_CFG0); - - /* MPCS */ - val = readl(mpcs + MVPP22_MPCS_CTRL); - val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; - writel(val, mpcs + MVPP22_MPCS_CTRL); - - val = readl(mpcs + MVPP22_MPCS_CLK_RESET); - val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | - MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); - val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); - writel(val, mpcs + MVPP22_MPCS_CLK_RESET); - - val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; - val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; - writel(val, mpcs + MVPP22_MPCS_CLK_RESET); -} - -static int mvpp22_gop_init(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - - if (!priv->sysctrl_base) - return 0; - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - if (port->gop_id == 0) - goto invalid_conf; - mvpp22_gop_init_rgmii(port); - break; - case PHY_INTERFACE_MODE_SGMII: - mvpp22_gop_init_sgmii(port); - break; - case PHY_INTERFACE_MODE_10GKR: - if (port->gop_id != 0) - goto invalid_conf; - mvpp22_gop_init_10gkr(port); - break; - default: - goto unsupported_conf; - } - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); - val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | - GENCONF_PORT_CTRL1_EN(port->gop_id); - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); - - regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); - val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; - regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); - - regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); - val |= GENCONF_SOFT_RESET1_GOP; - regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); - -unsupported_conf: - return 0; - -invalid_conf: - netdev_err(port->dev, "Invalid port configuration\n"); - return -EINVAL; -} - -static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) -{ - u32 val; - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - /* Enable the GMAC link status irq for this port */ - val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); - val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; - writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); - } - - if (port->gop_id == 0) { - /* Enable the XLG/GIG irqs for this port */ - val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); - if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) - val |= MVPP22_XLG_EXT_INT_MASK_XLG; - else - val |= MVPP22_XLG_EXT_INT_MASK_GIG; - writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); - } -} - -static void mvpp22_gop_mask_irq(struct mvpp2_port *port) -{ - u32 val; - - if (port->gop_id == 0) { - val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); - val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | - MVPP22_XLG_EXT_INT_MASK_GIG); - writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); - } - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); - val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; - writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); - } -} - -static void mvpp22_gop_setup_irq(struct mvpp2_port *port) -{ - u32 val; - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val = readl(port->base + MVPP22_GMAC_INT_MASK); - val |= MVPP22_GMAC_INT_MASK_LINK_STAT; - writel(val, port->base + MVPP22_GMAC_INT_MASK); - } - - if (port->gop_id == 0) { - val = readl(port->base + MVPP22_XLG_INT_MASK); - val |= MVPP22_XLG_INT_MASK_LINK; - writel(val, port->base + MVPP22_XLG_INT_MASK); - } - - mvpp22_gop_unmask_irq(port); -} - -static int mvpp22_comphy_init(struct mvpp2_port *port) -{ - enum phy_mode mode; - int ret; - - if (!port->comphy) - return 0; - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_SGMII: - mode = PHY_MODE_SGMII; - break; - case PHY_INTERFACE_MODE_10GKR: - mode = PHY_MODE_10GKR; - break; - default: - return -EINVAL; - } - - ret = phy_set_mode(port->comphy, mode); - if (ret) - return ret; - - return phy_power_on(port->comphy); -} - -static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) -{ - u32 val; - - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; - writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { - val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - val &= ~MVPP22_CTRL4_DP_CLK_SEL; - writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); - } - - /* The port is connected to a copper PHY */ - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~MVPP2_GMAC_PORT_TYPE_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | - MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | - MVPP2_GMAC_AN_DUPLEX_EN; - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) - val |= MVPP2_GMAC_IN_BAND_AUTONEG; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) -{ - u32 val; - - /* Force link down */ - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_PASS; - val |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - - /* Set the GMAC in a reset state */ - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val |= MVPP2_GMAC_PORT_RESET_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - /* Configure the PCS and in-band AN */ - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; - } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { - val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; - } - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - mvpp2_port_mii_gmac_configure_mode(port); - - /* Unset the GMAC reset state */ - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - val &= ~MVPP2_GMAC_PORT_RESET_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - /* Stop forcing link down */ - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port) -{ - u32 val; - - if (port->gop_id != 0) - return; - - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - - val = readl(port->base + MVPP22_XLG_CTRL4_REG); - val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; - val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; - writel(val, port->base + MVPP22_XLG_CTRL4_REG); -} - -static void mvpp22_port_mii_set(struct mvpp2_port *port) -{ - u32 val; - - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0) { - val = readl(port->base + MVPP22_XLG_CTRL3_REG); - val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - - if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR) - val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; - else - val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; - - writel(val, port->base + MVPP22_XLG_CTRL3_REG); - } -} - -static void mvpp2_port_mii_set(struct mvpp2_port *port) -{ - if (port->priv->hw_version == MVPP22) - mvpp22_port_mii_set(port); - - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) - mvpp2_port_mii_gmac_configure(port); - else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) - mvpp2_port_mii_xlg_configure(port); -} - -static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val |= MVPP2_GMAC_FC_ADV_EN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -static void mvpp2_port_enable(struct mvpp2_port *port) -{ - u32 val; - - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val |= MVPP22_XLG_CTRL0_PORT_EN | - MVPP22_XLG_CTRL0_MAC_RESET_DIS; - val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - } else { - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val |= MVPP2_GMAC_PORT_EN_MASK; - val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); - } -} - -static void mvpp2_port_disable(struct mvpp2_port *port) -{ - u32 val; - - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val &= ~(MVPP22_XLG_CTRL0_PORT_EN | - MVPP22_XLG_CTRL0_MAC_RESET_DIS); - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - } else { - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~(MVPP2_GMAC_PORT_EN_MASK); - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); - } -} - -/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ -static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & - ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); -} - -/* Configure loopback port */ -static void mvpp2_port_loopback_set(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); - - if (port->speed == 1000) - val |= MVPP2_GMAC_GMII_LB_EN_MASK; - else - val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; - - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) - val |= MVPP2_GMAC_PCS_LB_EN_MASK; - else - val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; - - writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); -} - -struct mvpp2_ethtool_counter { - unsigned int offset; - const char string[ETH_GSTRING_LEN]; - bool reg_is_64b; -}; - -static u64 mvpp2_read_count(struct mvpp2_port *port, - const struct mvpp2_ethtool_counter *counter) -{ - u64 val; - - val = readl(port->stats_base + counter->offset); - if (counter->reg_is_64b) - val += (u64)readl(port->stats_base + counter->offset + 4) << 32; - - return val; -} - -/* Due to the fact that software statistics and hardware statistics are, by - * design, incremented at different moments in the chain of packet processing, - * it is very likely that incoming packets could have been dropped after being - * counted by hardware but before reaching software statistics (most probably - * multicast packets), and in the oppposite way, during transmission, FCS bytes - * are added in between as well as TSO skb will be split and header bytes added. - * Hence, statistics gathered from userspace with ifconfig (software) and - * ethtool (hardware) cannot be compared. - */ -static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { - { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, - { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, - { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, - { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, - { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, - { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, - { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, - { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, - { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, - { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, - { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, - { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, - { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, - { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, - { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, - { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, - { MVPP2_MIB_FC_SENT, "fc_sent" }, - { MVPP2_MIB_FC_RCVD, "fc_received" }, - { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, - { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, - { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, - { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, - { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, - { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, - { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, - { MVPP2_MIB_COLLISION, "collision" }, - { MVPP2_MIB_LATE_COLLISION, "late_collision" }, -}; - -static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, - u8 *data) -{ - if (sset == ETH_SS_STATS) { - int i; - - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - memcpy(data + i * ETH_GSTRING_LEN, - &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); - } -} - -static void mvpp2_gather_hw_statistics(struct work_struct *work) -{ - struct delayed_work *del_work = to_delayed_work(work); - struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, - stats_work); - u64 *pstats; - int i; - - mutex_lock(&port->gather_stats_lock); - - pstats = port->ethtool_stats; - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); - - /* No need to read again the counters right after this function if it - * was called asynchronously by the user (ie. use of ethtool). - */ - cancel_delayed_work(&port->stats_work); - queue_delayed_work(port->priv->stats_queue, &port->stats_work, - MVPP2_MIB_COUNTERS_STATS_DELAY); - - mutex_unlock(&port->gather_stats_lock); -} - -static void mvpp2_ethtool_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct mvpp2_port *port = netdev_priv(dev); - - /* Update statistics for the given port, then take the lock to avoid - * concurrent accesses on the ethtool_stats structure during its copy. - */ - mvpp2_gather_hw_statistics(&port->stats_work.work); - - mutex_lock(&port->gather_stats_lock); - memcpy(data, port->ethtool_stats, - sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); - mutex_unlock(&port->gather_stats_lock); -} - -static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) -{ - if (sset == ETH_SS_STATS) - return ARRAY_SIZE(mvpp2_ethtool_regs); - - return -EOPNOTSUPP; -} - -static void mvpp2_port_reset(struct mvpp2_port *port) -{ - u32 val; - unsigned int i; - - /* Read the GOP statistics to reset the hardware counters */ - for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) - mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); - - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - ~MVPP2_GMAC_PORT_RESET_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - MVPP2_GMAC_PORT_RESET_MASK) - continue; -} - -/* Change maximum receive size of the port */ -static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; - val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << - MVPP2_GMAC_MAX_RX_SIZE_OFFS); - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); -} - -/* Change maximum receive size of the port */ -static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) -{ - u32 val; - - val = readl(port->base + MVPP22_XLG_CTRL1_REG); - val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; - val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << - MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; - writel(val, port->base + MVPP22_XLG_CTRL1_REG); -} - -/* Set defaults to the MVPP2 port */ -static void mvpp2_defaults_set(struct mvpp2_port *port) -{ - int tx_port_num, val, queue, ptxq, lrxq; - - if (port->priv->hw_version == MVPP21) { - /* Configure port to loopback if needed */ - if (port->flags & MVPP2_F_LOOPBACK) - mvpp2_port_loopback_set(port); - - /* Update TX FIFO MIN Threshold */ - val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); - val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; - /* Min. TX threshold must be less than minimal packet length */ - val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); - writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); - } - - /* Disable Legacy WRR, Disable EJP, Release from reset */ - tx_port_num = mvpp2_egress_port(port); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, - tx_port_num); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); - - /* Close bandwidth for all queues */ - for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { - ptxq = mvpp2_txq_phys(port->id, queue); - mvpp2_write(port->priv, - MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); - } - - /* Set refill period to 1 usec, refill tokens - * and bucket size to maximum - */ - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, - port->priv->tclk / USEC_PER_SEC); - val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); - val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; - val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); - val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); - val = MVPP2_TXP_TOKEN_SIZE_MAX; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); - - /* Set MaximumLowLatencyPacketSize value to 256 */ - mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), - MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | - MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); - - /* Enable Rx cache snoop */ - for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { - queue = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); - val |= MVPP2_SNOOP_PKT_SIZE_MASK | - MVPP2_SNOOP_BUF_HDR_MASK; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); - } - - /* At default, mask all interrupts to all present cpus */ - mvpp2_interrupts_disable(port); -} - -/* Enable/disable receiving packets */ -static void mvpp2_ingress_enable(struct mvpp2_port *port) -{ - u32 val; - int lrxq, queue; - - for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { - queue = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); - val &= ~MVPP2_RXQ_DISABLE_MASK; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); - } -} - -static void mvpp2_ingress_disable(struct mvpp2_port *port) -{ - u32 val; - int lrxq, queue; - - for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { - queue = port->rxqs[lrxq]->id; - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); - val |= MVPP2_RXQ_DISABLE_MASK; - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); - } -} - -/* Enable transmit via physical egress queue - * - HW starts take descriptors from DRAM - */ -static void mvpp2_egress_enable(struct mvpp2_port *port) -{ - u32 qmap; - int queue; - int tx_port_num = mvpp2_egress_port(port); - - /* Enable all initialized TXs. */ - qmap = 0; - for (queue = 0; queue < port->ntxqs; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - if (txq->descs) - qmap |= (1 << queue); - } - - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); -} - -/* Disable transmit via physical egress queue - * - HW doesn't take descriptors from DRAM - */ -static void mvpp2_egress_disable(struct mvpp2_port *port) -{ - u32 reg_data; - int delay; - int tx_port_num = mvpp2_egress_port(port); - - /* Issue stop command for active channels only */ - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & - MVPP2_TXP_SCHED_ENQ_MASK; - if (reg_data != 0) - mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, - (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); - - /* Wait for all Tx activity to terminate. */ - delay = 0; - do { - if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { - netdev_warn(port->dev, - "Tx stop timed out, status=0x%08x\n", - reg_data); - break; - } - mdelay(1); - delay++; - - /* Check port TX Command register that all - * Tx queues are stopped - */ - reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); - } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); -} - -/* Rx descriptors helper methods */ - -/* Get number of Rx descriptors occupied by received packets */ -static inline int -mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) -{ - u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); - - return val & MVPP2_RXQ_OCCUPIED_MASK; -} - -/* Update Rx queue status with the number of occupied and available - * Rx descriptor slots. - */ -static inline void -mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, - int used_count, int free_count) -{ - /* Decrement the number of used descriptors and increment count - * increment the number of free descriptors. - */ - u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); - - mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); -} - -/* Get pointer to next RX descriptor to be processed by SW */ -static inline struct mvpp2_rx_desc * -mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) -{ - int rx_desc = rxq->next_desc_to_proc; - - rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); - prefetch(rxq->descs + rxq->next_desc_to_proc); - return rxq->descs + rx_desc; -} - -/* Set rx queue offset */ -static void mvpp2_rxq_offset_set(struct mvpp2_port *port, - int prxq, int offset) -{ - u32 val; - - /* Convert offset from bytes to units of 32 bytes */ - offset = offset >> 5; - - val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); - val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; - - /* Offset is in */ - val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & - MVPP2_RXQ_PACKET_OFFSET_MASK); - - mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); -} - -/* Tx descriptors helper methods */ - -/* Get pointer to next Tx descriptor to be processed (send) by HW */ -static struct mvpp2_tx_desc * -mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) -{ - int tx_desc = txq->next_desc_to_proc; - - txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); - return txq->descs + tx_desc; -} - -/* Update HW with number of aggregated Tx descriptors to be sent - * - * Called only from mvpp2_tx(), so migration is disabled, using - * smp_processor_id() is OK. - */ -static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) -{ - /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_AGGR_TXQ_UPDATE_REG, pending); -} - - -/* Check if there are enough free descriptors in aggregated txq. - * If not, update the number of occupied descriptors and repeat the check. - * - * Called only from mvpp2_tx(), so migration is disabled, using - * smp_processor_id() is OK. - */ -static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, - struct mvpp2_tx_queue *aggr_txq, int num) -{ - if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { - /* Update number of occupied aggregated Tx descriptors */ - int cpu = smp_processor_id(); - u32 val = mvpp2_read_relaxed(priv, - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); - - aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; - } - - if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) - return -ENOMEM; - - return 0; -} - -/* Reserved Tx descriptors allocation request - * - * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called - * only by mvpp2_tx(), so migration is disabled, using - * smp_processor_id() is OK. - */ -static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, - struct mvpp2_tx_queue *txq, int num) -{ - u32 val; - int cpu = smp_processor_id(); - - val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); - - val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); - - return val & MVPP2_TXQ_RSVD_RSLT_MASK; -} - -/* Check if there are enough reserved descriptors for transmission. - * If not, request chunk of reserved descriptors and check again. - */ -static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, - struct mvpp2_tx_queue *txq, - struct mvpp2_txq_pcpu *txq_pcpu, - int num) -{ - int req, cpu, desc_count; - - if (txq_pcpu->reserved_num >= num) - return 0; - - /* Not enough descriptors reserved! Update the reserved descriptor - * count and check again. - */ - - desc_count = 0; - /* Compute total of used descriptors */ - for_each_present_cpu(cpu) { - struct mvpp2_txq_pcpu *txq_pcpu_aux; - - txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); - desc_count += txq_pcpu_aux->count; - desc_count += txq_pcpu_aux->reserved_num; - } - - req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); - desc_count += req; - - if (desc_count > - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) - return -ENOMEM; - - txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); - - /* OK, the descriptor cound has been updated: check again. */ - if (txq_pcpu->reserved_num < num) - return -ENOMEM; - return 0; -} - -/* Release the last allocated Tx descriptor. Useful to handle DMA - * mapping failures in the Tx path. - */ -static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) -{ - if (txq->next_desc_to_proc == 0) - txq->next_desc_to_proc = txq->last_desc - 1; - else - txq->next_desc_to_proc--; -} - -/* Set Tx descriptors fields relevant for CSUM calculation */ -static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, - int ip_hdr_len, int l4_proto) -{ - u32 command; - - /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, - * G_L4_chk, L4_type required only for checksum calculation - */ - command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); - command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); - command |= MVPP2_TXD_IP_CSUM_DISABLE; - - if (l3_proto == swab16(ETH_P_IP)) { - command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ - command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ - } else { - command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ - } - - if (l4_proto == IPPROTO_TCP) { - command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ - command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ - } else if (l4_proto == IPPROTO_UDP) { - command |= MVPP2_TXD_L4_UDP; /* enable UDP */ - command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ - } else { - command |= MVPP2_TXD_L4_CSUM_NOT; - } - - return command; -} - -/* Get number of sent descriptors and decrement counter. - * The number of sent descriptors is returned. - * Per-CPU access - * - * Called only from mvpp2_txq_done(), called from mvpp2_tx() - * (migration disabled) and from the TX completion tasklet (migration - * disabled) so using smp_processor_id() is OK. - */ -static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - u32 val; - - /* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), - MVPP2_TXQ_SENT_REG(txq->id)); - - return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> - MVPP2_TRANSMITTED_COUNT_OFFSET; -} - -/* Called through on_each_cpu(), so runs on all CPUs, with migration - * disabled, therefore using smp_processor_id() is OK. - */ -static void mvpp2_txq_sent_counter_clear(void *arg) -{ - struct mvpp2_port *port = arg; - int queue; - - for (queue = 0; queue < port->ntxqs; queue++) { - int id = port->txqs[queue]->id; - - mvpp2_percpu_read(port->priv, smp_processor_id(), - MVPP2_TXQ_SENT_REG(id)); - } -} - -/* Set max sizes for Tx queues */ -static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) -{ - u32 val, size, mtu; - int txq, tx_port_num; - - mtu = port->pkt_size * 8; - if (mtu > MVPP2_TXP_MTU_MAX) - mtu = MVPP2_TXP_MTU_MAX; - - /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ - mtu = 3 * mtu; - - /* Indirect access to registers */ - tx_port_num = mvpp2_egress_port(port); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - - /* Set MTU */ - val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); - val &= ~MVPP2_TXP_MTU_MAX; - val |= mtu; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); - - /* TXP token size and all TXQs token size must be larger that MTU */ - val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); - size = val & MVPP2_TXP_TOKEN_SIZE_MAX; - if (size < mtu) { - size = mtu; - val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; - val |= size; - mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); - } - - for (txq = 0; txq < port->ntxqs; txq++) { - val = mvpp2_read(port->priv, - MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); - size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; - - if (size < mtu) { - size = mtu; - val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; - val |= size; - mvpp2_write(port->priv, - MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), - val); - } - } -} - -/* Set the number of packets that will be received before Rx interrupt - * will be generated by HW. - */ -static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - int cpu = get_cpu(); - - if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) - rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; - - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, - rxq->pkts_coal); - - put_cpu(); -} - -/* For some reason in the LSP this is done on each CPU. Why ? */ -static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - int cpu = get_cpu(); - u32 val; - - if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) - txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; - - val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); - - put_cpu(); -} - -static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) -{ - u64 tmp = (u64)clk_hz * usec; - - do_div(tmp, USEC_PER_SEC); - - return tmp > U32_MAX ? U32_MAX : tmp; -} - -static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) -{ - u64 tmp = (u64)cycles * USEC_PER_SEC; - - do_div(tmp, clk_hz); - - return tmp > U32_MAX ? U32_MAX : tmp; -} - -/* Set the time delay in usec before Rx interrupt */ -static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - unsigned long freq = port->priv->tclk; - u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); - - if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { - rxq->time_coal = - mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); - - /* re-evaluate to get actual register value */ - val = mvpp2_usec_to_cycles(rxq->time_coal, freq); - } - - mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); -} - -static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) -{ - unsigned long freq = port->priv->tclk; - u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); - - if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { - port->tx_time_coal = - mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); - - /* re-evaluate to get actual register value */ - val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); - } - - mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); -} - -/* Free Tx queue skbuffs */ -static void mvpp2_txq_bufs_free(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq, - struct mvpp2_txq_pcpu *txq_pcpu, int num) -{ - int i; - - for (i = 0; i < num; i++) { - struct mvpp2_txq_pcpu_buf *tx_buf = - txq_pcpu->buffs + txq_pcpu->txq_get_index; - - if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) - dma_unmap_single(port->dev->dev.parent, tx_buf->dma, - tx_buf->size, DMA_TO_DEVICE); - if (tx_buf->skb) - dev_kfree_skb_any(tx_buf->skb); - - mvpp2_txq_inc_get(txq_pcpu); - } -} - -static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, - u32 cause) -{ - int queue = fls(cause) - 1; - - return port->rxqs[queue]; -} - -static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, - u32 cause) -{ - int queue = fls(cause) - 1; - - return port->txqs[queue]; -} - -/* Handle end of transmission */ -static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, - struct mvpp2_txq_pcpu *txq_pcpu) -{ - struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); - int tx_done; - - if (txq_pcpu->cpu != smp_processor_id()) - netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); - - tx_done = mvpp2_txq_sent_desc_proc(port, txq); - if (!tx_done) - return; - mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); - - txq_pcpu->count -= tx_done; - - if (netif_tx_queue_stopped(nq)) - if (txq_pcpu->count <= txq_pcpu->wake_threshold) - netif_tx_wake_queue(nq); -} - -static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, - int cpu) -{ - struct mvpp2_tx_queue *txq; - struct mvpp2_txq_pcpu *txq_pcpu; - unsigned int tx_todo = 0; - - while (cause) { - txq = mvpp2_get_tx_queue(port, cause); - if (!txq) - break; - - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - - if (txq_pcpu->count) { - mvpp2_txq_done(port, txq, txq_pcpu); - tx_todo += txq_pcpu->count; - } - - cause &= ~(1 << txq->log_id); - } - return tx_todo; -} - -/* Rx/Tx queue initialization/cleanup methods */ - -/* Allocate and initialize descriptors for aggr TXQ */ -static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, int cpu, - struct mvpp2 *priv) -{ - u32 txq_dma; - - /* Allocate memory for TX descriptors */ - aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, - &aggr_txq->descs_dma, GFP_KERNEL); - if (!aggr_txq->descs) - return -ENOMEM; - - aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; - - /* Aggr TXQ no reset WA */ - aggr_txq->next_desc_to_proc = mvpp2_read(priv, - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); - - /* Set Tx descriptors queue starting address indirect - * access - */ - if (priv->hw_version == MVPP21) - txq_dma = aggr_txq->descs_dma; - else - txq_dma = aggr_txq->descs_dma >> - MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; - - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), - MVPP2_AGGR_TXQ_SIZE); - - return 0; -} - -/* Create a specified Rx queue */ -static int mvpp2_rxq_init(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) - -{ - u32 rxq_dma; - int cpu; - - rxq->size = port->rx_ring_size; - - /* Allocate memory for RX descriptors */ - rxq->descs = dma_alloc_coherent(port->dev->dev.parent, - rxq->size * MVPP2_DESC_ALIGNED_SIZE, - &rxq->descs_dma, GFP_KERNEL); - if (!rxq->descs) - return -ENOMEM; - - rxq->last_desc = rxq->size - 1; - - /* Zero occupied and non-occupied counters - direct access */ - mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - - /* Set Rx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - if (port->priv->hw_version == MVPP21) - rxq_dma = rxq->descs_dma; - else - rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); - put_cpu(); - - /* Set Offset */ - mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); - - /* Set coalescing pkts and time */ - mvpp2_rx_pkts_coal_set(port, rxq); - mvpp2_rx_time_coal_set(port, rxq); - - /* Add number of descriptors ready for receiving packets */ - mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); - - return 0; -} - -/* Push packets received by the RXQ to BM pool */ -static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - int rx_received, i; - - rx_received = mvpp2_rxq_received(port, rxq->id); - if (!rx_received) - return; - - for (i = 0; i < rx_received; i++) { - struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 status = mvpp2_rxdesc_status_get(port, rx_desc); - int pool; - - pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - - mvpp2_bm_pool_put(port, pool, - mvpp2_rxdesc_dma_addr_get(port, rx_desc), - mvpp2_rxdesc_cookie_get(port, rx_desc)); - } - mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); -} - -/* Cleanup Rx queue */ -static void mvpp2_rxq_deinit(struct mvpp2_port *port, - struct mvpp2_rx_queue *rxq) -{ - int cpu; - - mvpp2_rxq_drop_pkts(port, rxq); - - if (rxq->descs) - dma_free_coherent(port->dev->dev.parent, - rxq->size * MVPP2_DESC_ALIGNED_SIZE, - rxq->descs, - rxq->descs_dma); - - rxq->descs = NULL; - rxq->last_desc = 0; - rxq->next_desc_to_proc = 0; - rxq->descs_dma = 0; - - /* Clear Rx descriptors queue starting address and size; - * free descriptor number - */ - mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); - put_cpu(); -} - -/* Create and initialize a Tx queue */ -static int mvpp2_txq_init(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - u32 val; - int cpu, desc, desc_per_txq, tx_port_num; - struct mvpp2_txq_pcpu *txq_pcpu; - - txq->size = port->tx_ring_size; - - /* Allocate memory for Tx descriptors */ - txq->descs = dma_alloc_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - &txq->descs_dma, GFP_KERNEL); - if (!txq->descs) - return -ENOMEM; - - txq->last_desc = txq->size - 1; - - /* Set Tx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, - txq->descs_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, - txq->size & MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, - txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); - val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); - - /* Calculate base address in prefetch buffer. We reserve 16 descriptors - * for each existing TXQ. - * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT - * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS - */ - desc_per_txq = 16; - desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + - (txq->log_id * desc_per_txq); - - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, - MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | - MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); - put_cpu(); - - /* WRR / EJP configuration - indirect access */ - tx_port_num = mvpp2_egress_port(port); - mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); - - val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); - val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; - val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); - val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); - - val = MVPP2_TXQ_TOKEN_SIZE_MAX; - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), - val); - - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->size = txq->size; - txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, - sizeof(*txq_pcpu->buffs), - GFP_KERNEL); - if (!txq_pcpu->buffs) - return -ENOMEM; - - txq_pcpu->count = 0; - txq_pcpu->reserved_num = 0; - txq_pcpu->txq_put_index = 0; - txq_pcpu->txq_get_index = 0; - txq_pcpu->tso_headers = NULL; - - txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; - txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; - - txq_pcpu->tso_headers = - dma_alloc_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - &txq_pcpu->tso_headers_dma, - GFP_KERNEL); - if (!txq_pcpu->tso_headers) - return -ENOMEM; - } - - return 0; -} - -/* Free allocated TXQ resources */ -static void mvpp2_txq_deinit(struct mvpp2_port *port, - struct mvpp2_tx_queue *txq) -{ - struct mvpp2_txq_pcpu *txq_pcpu; - int cpu; - - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - kfree(txq_pcpu->buffs); - - if (txq_pcpu->tso_headers) - dma_free_coherent(port->dev->dev.parent, - txq_pcpu->size * TSO_HEADER_SIZE, - txq_pcpu->tso_headers, - txq_pcpu->tso_headers_dma); - - txq_pcpu->tso_headers = NULL; - } - - if (txq->descs) - dma_free_coherent(port->dev->dev.parent, - txq->size * MVPP2_DESC_ALIGNED_SIZE, - txq->descs, txq->descs_dma); - - txq->descs = NULL; - txq->last_desc = 0; - txq->next_desc_to_proc = 0; - txq->descs_dma = 0; - - /* Set minimum bandwidth for disabled TXQs */ - mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); - - /* Set Tx descriptors queue starting address and size */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); - put_cpu(); -} - -/* Cleanup Tx ports */ -static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) -{ - struct mvpp2_txq_pcpu *txq_pcpu; - int delay, pending, cpu; - u32 val; - - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); - val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); - - /* The napi queue has been stopped so wait for all packets - * to be transmitted. - */ - delay = 0; - do { - if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { - netdev_warn(port->dev, - "port %d: cleaning queue %d timed out\n", - port->id, txq->log_id); - break; - } - mdelay(1); - delay++; - - pending = mvpp2_percpu_read(port->priv, cpu, - MVPP2_TXQ_PENDING_REG); - pending &= MVPP2_TXQ_PENDING_MASK; - } while (pending); - - val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); - put_cpu(); - - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - - /* Release all packets */ - mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); - - /* Reset queue */ - txq_pcpu->count = 0; - txq_pcpu->txq_put_index = 0; - txq_pcpu->txq_get_index = 0; - } -} - -/* Cleanup all Tx queues */ -static void mvpp2_cleanup_txqs(struct mvpp2_port *port) -{ - struct mvpp2_tx_queue *txq; - int queue; - u32 val; - - val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); - - /* Reset Tx ports and delete Tx queues */ - val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); - mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); - - for (queue = 0; queue < port->ntxqs; queue++) { - txq = port->txqs[queue]; - mvpp2_txq_clean(port, txq); - mvpp2_txq_deinit(port, txq); - } - - on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); - - val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); - mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); -} - -/* Cleanup all Rx queues */ -static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) -{ - int queue; - - for (queue = 0; queue < port->nrxqs; queue++) - mvpp2_rxq_deinit(port, port->rxqs[queue]); -} - -/* Init all Rx queues for port */ -static int mvpp2_setup_rxqs(struct mvpp2_port *port) -{ - int queue, err; - - for (queue = 0; queue < port->nrxqs; queue++) { - err = mvpp2_rxq_init(port, port->rxqs[queue]); - if (err) - goto err_cleanup; - } - return 0; - -err_cleanup: - mvpp2_cleanup_rxqs(port); - return err; -} - -/* Init all tx queues for port */ -static int mvpp2_setup_txqs(struct mvpp2_port *port) -{ - struct mvpp2_tx_queue *txq; - int queue, err; - - for (queue = 0; queue < port->ntxqs; queue++) { - txq = port->txqs[queue]; - err = mvpp2_txq_init(port, txq); - if (err) - goto err_cleanup; - } - - if (port->has_tx_irqs) { - mvpp2_tx_time_coal_set(port); - for (queue = 0; queue < port->ntxqs; queue++) { - txq = port->txqs[queue]; - mvpp2_tx_pkts_coal_set(port, txq); - } - } - - on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); - return 0; - -err_cleanup: - mvpp2_cleanup_txqs(port); - return err; -} - -/* The callback for per-port interrupt */ -static irqreturn_t mvpp2_isr(int irq, void *dev_id) -{ - struct mvpp2_queue_vector *qv = dev_id; - - mvpp2_qvec_interrupt_disable(qv); - - napi_schedule(&qv->napi); - - return IRQ_HANDLED; -} - -/* Per-port interrupt for link status changes */ -static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) -{ - struct mvpp2_port *port = (struct mvpp2_port *)dev_id; - struct net_device *dev = port->dev; - bool event = false, link = false; - u32 val; - - mvpp22_gop_mask_irq(port); - - if (port->gop_id == 0 && - port->phy_interface == PHY_INTERFACE_MODE_10GKR) { - val = readl(port->base + MVPP22_XLG_INT_STAT); - if (val & MVPP22_XLG_INT_STAT_LINK) { - event = true; - val = readl(port->base + MVPP22_XLG_STATUS); - if (val & MVPP22_XLG_STATUS_LINK_UP) - link = true; - } - } else if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val = readl(port->base + MVPP22_GMAC_INT_STAT); - if (val & MVPP22_GMAC_INT_STAT_LINK) { - event = true; - val = readl(port->base + MVPP2_GMAC_STATUS0); - if (val & MVPP2_GMAC_STATUS0_LINK_UP) - link = true; - } - } - - if (!netif_running(dev) || !event) - goto handled; - - if (link) { - mvpp2_interrupts_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - netif_carrier_on(dev); - netif_tx_wake_all_queues(dev); - } else { - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - mvpp2_ingress_disable(port); - mvpp2_egress_disable(port); - - mvpp2_interrupts_disable(port); - } - -handled: - mvpp22_gop_unmask_irq(port); - return IRQ_HANDLED; -} - -static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port, - struct phy_device *phydev) -{ - u32 val; - - if (port->phy_interface != PHY_INTERFACE_MODE_RGMII && - port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID && - port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID && - port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID && - port->phy_interface != PHY_INTERFACE_MODE_SGMII) - return; - - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | - MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX | - MVPP2_GMAC_AN_SPEED_EN | - MVPP2_GMAC_AN_DUPLEX_EN); - - if (phydev->duplex) - val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; - - if (phydev->speed == SPEED_1000) - val |= MVPP2_GMAC_CONFIG_GMII_SPEED; - else if (phydev->speed == SPEED_100) - val |= MVPP2_GMAC_CONFIG_MII_SPEED; - - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); -} - -/* Adjust link */ -static void mvpp2_link_event(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct phy_device *phydev = dev->phydev; - bool link_reconfigured = false; - u32 val; - - if (phydev->link) { - if (port->phy_interface != phydev->interface && port->comphy) { - /* disable current port for reconfiguration */ - mvpp2_interrupts_disable(port); - netif_carrier_off(port->dev); - mvpp2_port_disable(port); - phy_power_off(port->comphy); - - /* comphy reconfiguration */ - port->phy_interface = phydev->interface; - mvpp22_comphy_init(port); - - /* gop/mac reconfiguration */ - mvpp22_gop_init(port); - mvpp2_port_mii_set(port); - - link_reconfigured = true; - } - - if ((port->speed != phydev->speed) || - (port->duplex != phydev->duplex)) { - mvpp2_gmac_set_autoneg(port, phydev); - - port->duplex = phydev->duplex; - port->speed = phydev->speed; - } - } - - if (phydev->link != port->link || link_reconfigured) { - port->link = phydev->link; - - if (phydev->link) { - if (port->phy_interface == PHY_INTERFACE_MODE_RGMII || - port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || - port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID || - port->phy_interface == PHY_INTERFACE_MODE_SGMII) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val |= (MVPP2_GMAC_FORCE_LINK_PASS | - MVPP2_GMAC_FORCE_LINK_DOWN); - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - } - - mvpp2_interrupts_enable(port); - mvpp2_port_enable(port); - - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - netif_carrier_on(dev); - netif_tx_wake_all_queues(dev); - } else { - port->duplex = -1; - port->speed = 0; - - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - mvpp2_ingress_disable(port); - mvpp2_egress_disable(port); - - mvpp2_port_disable(port); - mvpp2_interrupts_disable(port); - } - - phy_print_status(phydev); - } -} - -static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) -{ - ktime_t interval; - - if (!port_pcpu->timer_scheduled) { - port_pcpu->timer_scheduled = true; - interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; - hrtimer_start(&port_pcpu->tx_done_timer, interval, - HRTIMER_MODE_REL_PINNED); - } -} - -static void mvpp2_tx_proc_cb(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); - unsigned int tx_todo, cause; - - if (!netif_running(dev)) - return; - port_pcpu->timer_scheduled = false; - - /* Process all the Tx queues */ - cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); - - /* Set the timer in case not all the packets were processed */ - if (tx_todo) - mvpp2_timer_set(port_pcpu); -} - -static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) -{ - struct mvpp2_port_pcpu *port_pcpu = container_of(timer, - struct mvpp2_port_pcpu, - tx_done_timer); - - tasklet_schedule(&port_pcpu->tx_done_tasklet); - - return HRTIMER_NORESTART; -} - -/* Main RX/TX processing routines */ - -/* Display more error info */ -static void mvpp2_rx_error(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - u32 status = mvpp2_rxdesc_status_get(port, rx_desc); - size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); - - switch (status & MVPP2_RXD_ERR_CODE_MASK) { - case MVPP2_RXD_ERR_CRC: - netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", - status, sz); - break; - case MVPP2_RXD_ERR_OVERRUN: - netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", - status, sz); - break; - case MVPP2_RXD_ERR_RESOURCE: - netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", - status, sz); - break; - } -} - -/* Handle RX checksum offload */ -static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, - struct sk_buff *skb) -{ - if (((status & MVPP2_RXD_L3_IP4) && - !(status & MVPP2_RXD_IP4_HEADER_ERR)) || - (status & MVPP2_RXD_L3_IP6)) - if (((status & MVPP2_RXD_L4_UDP) || - (status & MVPP2_RXD_L4_TCP)) && - (status & MVPP2_RXD_L4_CSUM_OK)) { - skb->csum = 0; - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - } - - skb->ip_summed = CHECKSUM_NONE; -} - -/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ -static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, int pool) -{ - dma_addr_t dma_addr; - phys_addr_t phys_addr; - void *buf; - - /* No recycle or too many buffers are in use, so allocate a new skb */ - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, - GFP_ATOMIC); - if (!buf) - return -ENOMEM; - - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); - - return 0; -} - -/* Handle tx checksum */ -static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) -{ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - int ip_hdr_len = 0; - u8 l4_proto; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *ip4h = ip_hdr(skb); - - /* Calculate IPv4 checksum and L4 checksum */ - ip_hdr_len = ip4h->ihl; - l4_proto = ip4h->protocol; - } else if (skb->protocol == htons(ETH_P_IPV6)) { - struct ipv6hdr *ip6h = ipv6_hdr(skb); - - /* Read l4_protocol from one of IPv6 extra headers */ - if (skb_network_header_len(skb) > 0) - ip_hdr_len = (skb_network_header_len(skb) >> 2); - l4_proto = ip6h->nexthdr; - } else { - return MVPP2_TXD_L4_CSUM_NOT; - } - - return mvpp2_txq_desc_csum(skb_network_offset(skb), - skb->protocol, ip_hdr_len, l4_proto); - } - - return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; -} - -/* Main rx processing */ -static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, - int rx_todo, struct mvpp2_rx_queue *rxq) -{ - struct net_device *dev = port->dev; - int rx_received; - int rx_done = 0; - u32 rcvd_pkts = 0; - u32 rcvd_bytes = 0; - - /* Get number of received packets and clamp the to-do */ - rx_received = mvpp2_rxq_received(port, rxq->id); - if (rx_todo > rx_received) - rx_todo = rx_received; - - while (rx_done < rx_todo) { - struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - struct mvpp2_bm_pool *bm_pool; - struct sk_buff *skb; - unsigned int frag_size; - dma_addr_t dma_addr; - phys_addr_t phys_addr; - u32 rx_status; - int pool, rx_bytes, err; - void *data; - - rx_done++; - rx_status = mvpp2_rxdesc_status_get(port, rx_desc); - rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); - rx_bytes -= MVPP2_MH_SIZE; - dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); - phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); - data = (void *)phys_to_virt(phys_addr); - - pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - bm_pool = &port->priv->bm_pools[pool]; - - /* In case of an error, release the requested buffer pointer - * to the Buffer Manager. This request process is controlled - * by the hardware, and the information about the buffer is - * comprised by the RX descriptor. - */ - if (rx_status & MVPP2_RXD_ERR_SUMMARY) { -err_drop_frame: - dev->stats.rx_errors++; - mvpp2_rx_error(port, rx_desc); - /* Return the buffer to the pool */ - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); - continue; - } - - if (bm_pool->frag_size > PAGE_SIZE) - frag_size = 0; - else - frag_size = bm_pool->frag_size; - - skb = build_skb(data, frag_size); - if (!skb) { - netdev_warn(port->dev, "skb build failed\n"); - goto err_drop_frame; - } - - err = mvpp2_rx_refill(port, bm_pool, pool); - if (err) { - netdev_err(port->dev, "failed to refill BM pools\n"); - goto err_drop_frame; - } - - dma_unmap_single(dev->dev.parent, dma_addr, - bm_pool->buf_size, DMA_FROM_DEVICE); - - rcvd_pkts++; - rcvd_bytes += rx_bytes; - - skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); - skb_put(skb, rx_bytes); - skb->protocol = eth_type_trans(skb, dev); - mvpp2_rx_csum(port, rx_status, skb); - - napi_gro_receive(napi, skb); - } - - if (rcvd_pkts) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); - - u64_stats_update_begin(&stats->syncp); - stats->rx_packets += rcvd_pkts; - stats->rx_bytes += rcvd_bytes; - u64_stats_update_end(&stats->syncp); - } - - /* Update Rx queue management counters */ - wmb(); - mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); - - return rx_todo; -} - -static inline void -tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, - struct mvpp2_tx_desc *desc) -{ - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - - dma_addr_t buf_dma_addr = - mvpp2_txdesc_dma_addr_get(port, desc); - size_t buf_sz = - mvpp2_txdesc_size_get(port, desc); - if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) - dma_unmap_single(port->dev->dev.parent, buf_dma_addr, - buf_sz, DMA_TO_DEVICE); - mvpp2_txq_desc_put(txq); -} - -/* Handle tx fragmentation processing */ -static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_tx_queue *txq) -{ - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); - struct mvpp2_tx_desc *tx_desc; - int i; - dma_addr_t buf_dma_addr; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - void *addr = page_address(frag->page.p) + frag->page_offset; - - tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, frag->size); - - buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, - frag->size, - DMA_TO_DEVICE); - if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { - mvpp2_txq_desc_put(txq); - goto cleanup; - } - - mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); - - if (i == (skb_shinfo(skb)->nr_frags - 1)) { - /* Last descriptor */ - mvpp2_txdesc_cmd_set(port, tx_desc, - MVPP2_TXD_L_DESC); - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); - } else { - /* Descriptor in the middle: Not First, Not Last */ - mvpp2_txdesc_cmd_set(port, tx_desc, 0); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); - } - } - - return 0; -cleanup: - /* Release all descriptors that were used to map fragments of - * this packet, as well as the corresponding DMA mappings - */ - for (i = i - 1; i >= 0; i--) { - tx_desc = txq->descs + i; - tx_desc_unmap_put(port, txq, tx_desc); - } - - return -ENOMEM; -} - -static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, - struct net_device *dev, - struct mvpp2_tx_queue *txq, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_txq_pcpu *txq_pcpu, - int hdr_sz) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - dma_addr_t addr; - - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); - - addr = txq_pcpu->tso_headers_dma + - txq_pcpu->txq_put_index * TSO_HEADER_SIZE; - mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); - - mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | - MVPP2_TXD_F_DESC | - MVPP2_TXD_PADDING_DISABLE); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); -} - -static inline int mvpp2_tso_put_data(struct sk_buff *skb, - struct net_device *dev, struct tso_t *tso, - struct mvpp2_tx_queue *txq, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_txq_pcpu *txq_pcpu, - int sz, bool left, bool last) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - dma_addr_t buf_dma_addr; - - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, sz); - - buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { - mvpp2_txq_desc_put(txq); - return -ENOMEM; - } - - mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); - - if (!left) { - mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); - if (last) { - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); - return 0; - } - } else { - mvpp2_txdesc_cmd_set(port, tx_desc, 0); - } - - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); - return 0; -} - -static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, - struct mvpp2_tx_queue *txq, - struct mvpp2_tx_queue *aggr_txq, - struct mvpp2_txq_pcpu *txq_pcpu) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct tso_t tso; - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); - int i, len, descs = 0; - - /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, - tso_count_descs(skb)) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, - tso_count_descs(skb))) - return 0; - - tso_start(skb, &tso); - len = skb->len - hdr_sz; - while (len > 0) { - int left = min_t(int, skb_shinfo(skb)->gso_size, len); - char *hdr = txq_pcpu->tso_headers + - txq_pcpu->txq_put_index * TSO_HEADER_SIZE; - - len -= left; - descs++; - - tso_build_hdr(skb, hdr, &tso, left, len == 0); - mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); - - while (left > 0) { - int sz = min_t(int, tso.size, left); - left -= sz; - descs++; - - if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, - txq_pcpu, sz, left, len == 0)) - goto release; - tso_build_data(skb, &tso, sz); - } - } - - return descs; - -release: - for (i = descs - 1; i >= 0; i--) { - struct mvpp2_tx_desc *tx_desc = txq->descs + i; - tx_desc_unmap_put(port, txq, tx_desc); - } - return 0; -} - -/* Main tx processing */ -static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_tx_queue *txq, *aggr_txq; - struct mvpp2_txq_pcpu *txq_pcpu; - struct mvpp2_tx_desc *tx_desc; - dma_addr_t buf_dma_addr; - int frags = 0; - u16 txq_id; - u32 tx_cmd; - - txq_id = skb_get_queue_mapping(skb); - txq = port->txqs[txq_id]; - txq_pcpu = this_cpu_ptr(txq->pcpu); - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; - - if (skb_is_gso(skb)) { - frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); - goto out; - } - frags = skb_shinfo(skb)->nr_frags + 1; - - /* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, - txq_pcpu, frags)) { - frags = 0; - goto out; - } - - /* Get a descriptor for the first part of the packet */ - tx_desc = mvpp2_txq_next_desc_get(aggr_txq); - mvpp2_txdesc_txq_set(port, tx_desc, txq->id); - mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); - - buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { - mvpp2_txq_desc_put(txq); - frags = 0; - goto out; - } - - mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); - - tx_cmd = mvpp2_skb_tx_csum(port, skb); - - if (frags == 1) { - /* First and Last descriptor */ - tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; - mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); - } else { - /* First but not Last */ - tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; - mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); - - /* Continue with other skb fragments */ - if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { - tx_desc_unmap_put(port, txq, tx_desc); - frags = 0; - } - } - -out: - if (frags > 0) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); - struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); - - txq_pcpu->reserved_num -= frags; - txq_pcpu->count += frags; - aggr_txq->count += frags; - - /* Enable transmit */ - wmb(); - mvpp2_aggr_txq_pend_desc_add(port, frags); - - if (txq_pcpu->count >= txq_pcpu->stop_threshold) - netif_tx_stop_queue(nq); - - u64_stats_update_begin(&stats->syncp); - stats->tx_packets++; - stats->tx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - } else { - dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - } - - /* Finalize TX processing */ - if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) - mvpp2_txq_done(port, txq, txq_pcpu); - - /* Set the timer in case not all frags were processed */ - if (!port->has_tx_irqs && txq_pcpu->count <= frags && - txq_pcpu->count > 0) { - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); - - mvpp2_timer_set(port_pcpu); - } - - return NETDEV_TX_OK; -} - -static inline void mvpp2_cause_error(struct net_device *dev, int cause) -{ - if (cause & MVPP2_CAUSE_FCS_ERR_MASK) - netdev_err(dev, "FCS error\n"); - if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) - netdev_err(dev, "rx fifo overrun error\n"); - if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) - netdev_err(dev, "tx fifo underrun error\n"); -} - -static int mvpp2_poll(struct napi_struct *napi, int budget) -{ - u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; - int rx_done = 0; - struct mvpp2_port *port = netdev_priv(napi->dev); - struct mvpp2_queue_vector *qv; - int cpu = smp_processor_id(); - - qv = container_of(napi, struct mvpp2_queue_vector, napi); - - /* Rx/Tx cause register - * - * Bits 0-15: each bit indicates received packets on the Rx queue - * (bit 0 is for Rx queue 0). - * - * Bits 16-23: each bit indicates transmitted packets on the Tx queue - * (bit 16 is for Tx queue 0). - * - * Each CPU has its own Rx/Tx cause register - */ - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - - cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; - if (cause_misc) { - mvpp2_cause_error(port->dev, cause_misc); - - /* Clear the cause register */ - mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_percpu_write(port->priv, cpu, - MVPP2_ISR_RX_TX_CAUSE_REG(port->id), - cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); - } - - cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - if (cause_tx) { - cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; - mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); - } - - /* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; - cause_rx <<= qv->first_rxq; - cause_rx |= qv->pending_cause_rx; - while (cause_rx && budget > 0) { - int count; - struct mvpp2_rx_queue *rxq; - - rxq = mvpp2_get_rx_queue(port, cause_rx); - if (!rxq) - break; - - count = mvpp2_rx(port, napi, budget, rxq); - rx_done += count; - budget -= count; - if (budget > 0) { - /* Clear the bit associated to this Rx queue - * so that next iteration will continue from - * the next Rx queue. - */ - cause_rx &= ~(1 << rxq->logic_rxq); - } - } - - if (budget > 0) { - cause_rx = 0; - napi_complete_done(napi, rx_done); - - mvpp2_qvec_interrupt_enable(qv); - } - qv->pending_cause_rx = cause_rx; - return rx_done; -} - -/* Set hw internals when starting port */ -static void mvpp2_start_dev(struct mvpp2_port *port) -{ - struct net_device *ndev = port->dev; - int i; - - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) - mvpp2_xlg_max_rx_size_set(port); - else - mvpp2_gmac_max_rx_size_set(port); - - mvpp2_txp_max_tx_size_set(port); - - for (i = 0; i < port->nqvecs; i++) - napi_enable(&port->qvecs[i].napi); - - /* Enable interrupts on all CPUs */ - mvpp2_interrupts_enable(port); - - if (port->priv->hw_version == MVPP22) { - mvpp22_comphy_init(port); - mvpp22_gop_init(port); - } - - mvpp2_port_mii_set(port); - mvpp2_port_enable(port); - if (ndev->phydev) - phy_start(ndev->phydev); - netif_tx_start_all_queues(port->dev); -} - -/* Set hw internals when stopping port */ -static void mvpp2_stop_dev(struct mvpp2_port *port) -{ - struct net_device *ndev = port->dev; - int i; - - /* Stop new packets from arriving to RXQs */ - mvpp2_ingress_disable(port); - - mdelay(10); - - /* Disable interrupts on all CPUs */ - mvpp2_interrupts_disable(port); - - for (i = 0; i < port->nqvecs; i++) - napi_disable(&port->qvecs[i].napi); - - netif_carrier_off(port->dev); - netif_tx_stop_all_queues(port->dev); - - mvpp2_egress_disable(port); - mvpp2_port_disable(port); - if (ndev->phydev) - phy_stop(ndev->phydev); - phy_power_off(port->comphy); -} - -static int mvpp2_check_ringparam_valid(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - u16 new_rx_pending = ring->rx_pending; - u16 new_tx_pending = ring->tx_pending; - - if (ring->rx_pending == 0 || ring->tx_pending == 0) - return -EINVAL; - - if (ring->rx_pending > MVPP2_MAX_RXD_MAX) - new_rx_pending = MVPP2_MAX_RXD_MAX; - else if (!IS_ALIGNED(ring->rx_pending, 16)) - new_rx_pending = ALIGN(ring->rx_pending, 16); - - if (ring->tx_pending > MVPP2_MAX_TXD_MAX) - new_tx_pending = MVPP2_MAX_TXD_MAX; - else if (!IS_ALIGNED(ring->tx_pending, 32)) - new_tx_pending = ALIGN(ring->tx_pending, 32); - - /* The Tx ring size cannot be smaller than the minimum number of - * descriptors needed for TSO. - */ - if (new_tx_pending < MVPP2_MAX_SKB_DESCS) - new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); - - if (ring->rx_pending != new_rx_pending) { - netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", - ring->rx_pending, new_rx_pending); - ring->rx_pending = new_rx_pending; - } - - if (ring->tx_pending != new_tx_pending) { - netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", - ring->tx_pending, new_tx_pending); - ring->tx_pending = new_tx_pending; - } - - return 0; -} - -static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) -{ - u32 mac_addr_l, mac_addr_m, mac_addr_h; - - mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); - mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); - mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); - addr[0] = (mac_addr_h >> 24) & 0xFF; - addr[1] = (mac_addr_h >> 16) & 0xFF; - addr[2] = (mac_addr_h >> 8) & 0xFF; - addr[3] = mac_addr_h & 0xFF; - addr[4] = mac_addr_m & 0xFF; - addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; -} - -static int mvpp2_phy_connect(struct mvpp2_port *port) -{ - struct phy_device *phy_dev; - - /* No PHY is attached */ - if (!port->phy_node) - return 0; - - phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0, - port->phy_interface); - if (!phy_dev) { - netdev_err(port->dev, "cannot connect to phy\n"); - return -ENODEV; - } - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->advertising = phy_dev->supported; - - port->link = 0; - port->duplex = 0; - port->speed = 0; - - return 0; -} - -static void mvpp2_phy_disconnect(struct mvpp2_port *port) -{ - struct net_device *ndev = port->dev; - - if (!ndev->phydev) - return; - - phy_disconnect(ndev->phydev); -} - -static int mvpp2_irqs_init(struct mvpp2_port *port) -{ - int err, i; - - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); - - err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); - if (err) - goto err; - - if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_affinity_hint(qv->irq, - cpumask_of(qv->sw_thread_id)); - } - - return 0; -err: - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - irq_set_affinity_hint(qv->irq, NULL); - free_irq(qv->irq, qv); - } - - return err; -} - -static void mvpp2_irqs_deinit(struct mvpp2_port *port) -{ - int i; - - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - irq_set_affinity_hint(qv->irq, NULL); - irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); - free_irq(qv->irq, qv); - } -} - -static void mvpp22_init_rss(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - int i; - - /* Set the table width: replace the whole classifier Rx queue number - * with the ones configured in RSS table entries. - */ - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); - mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); - - /* Loop through the classifier Rx Queues and map them to a RSS table. - * Map them all to the first table (0) by default. - */ - for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { - mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); - mvpp2_write(priv, MVPP22_RSS_TABLE, - MVPP22_RSS_TABLE_POINTER(0)); - } - - /* Configure the first table to evenly distribute the packets across - * real Rx Queues. The table entries map a hash to an port Rx Queue. - */ - for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { - u32 sel = MVPP22_RSS_INDEX_TABLE(0) | - MVPP22_RSS_INDEX_TABLE_ENTRY(i); - mvpp2_write(priv, MVPP22_RSS_INDEX, sel); - - mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); - } - -} - -static int mvpp2_open(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2 *priv = port->priv; - unsigned char mac_bcast[ETH_ALEN] = { - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - int err; - - err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); - if (err) { - netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); - return err; - } - err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); - if (err) { - netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); - return err; - } - err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); - if (err) { - netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); - return err; - } - err = mvpp2_prs_def_flow(port); - if (err) { - netdev_err(dev, "mvpp2_prs_def_flow failed\n"); - return err; - } - - /* Allocate the Rx/Tx queues */ - err = mvpp2_setup_rxqs(port); - if (err) { - netdev_err(port->dev, "cannot allocate Rx queues\n"); - return err; - } - - err = mvpp2_setup_txqs(port); - if (err) { - netdev_err(port->dev, "cannot allocate Tx queues\n"); - goto err_cleanup_rxqs; - } - - err = mvpp2_irqs_init(port); - if (err) { - netdev_err(port->dev, "cannot init IRQs\n"); - goto err_cleanup_txqs; - } - - if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) { - err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, - dev->name, port); - if (err) { - netdev_err(port->dev, "cannot request link IRQ %d\n", - port->link_irq); - goto err_free_irq; - } - - mvpp22_gop_setup_irq(port); - } - - /* In default link is down */ - netif_carrier_off(port->dev); - - err = mvpp2_phy_connect(port); - if (err < 0) - goto err_free_link_irq; - - /* Unmask interrupts on all CPUs */ - on_each_cpu(mvpp2_interrupts_unmask, port, 1); - mvpp2_shared_interrupt_mask_unmask(port, false); - - mvpp2_start_dev(port); - - if (priv->hw_version == MVPP22) - mvpp22_init_rss(port); - - /* Start hardware statistics gathering */ - queue_delayed_work(priv->stats_queue, &port->stats_work, - MVPP2_MIB_COUNTERS_STATS_DELAY); - - return 0; - -err_free_link_irq: - if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) - free_irq(port->link_irq, port); -err_free_irq: - mvpp2_irqs_deinit(port); -err_cleanup_txqs: - mvpp2_cleanup_txqs(port); -err_cleanup_rxqs: - mvpp2_cleanup_rxqs(port); - return err; -} - -static int mvpp2_stop(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu; - struct mvpp2 *priv = port->priv; - int cpu; - - mvpp2_stop_dev(port); - mvpp2_phy_disconnect(port); - - /* Mask interrupts on all CPUs */ - on_each_cpu(mvpp2_interrupts_mask, port, 1); - mvpp2_shared_interrupt_mask_unmask(port, true); - - if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) - free_irq(port->link_irq, port); - - mvpp2_irqs_deinit(port); - if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); - - hrtimer_cancel(&port_pcpu->tx_done_timer); - port_pcpu->timer_scheduled = false; - tasklet_kill(&port_pcpu->tx_done_tasklet); - } - } - mvpp2_cleanup_rxqs(port); - mvpp2_cleanup_txqs(port); - - cancel_delayed_work_sync(&port->stats_work); - - return 0; -} - -static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, - struct netdev_hw_addr_list *list) -{ - struct netdev_hw_addr *ha; - int ret; - - netdev_hw_addr_list_for_each(ha, list) { - ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); - if (ret) - return ret; - } - - return 0; -} - -static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) -{ - if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) - mvpp2_prs_vid_enable_filtering(port); - else - mvpp2_prs_vid_disable_filtering(port); - - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_UNI_CAST, enable); - - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_MULTI_CAST, enable); -} - -static void mvpp2_set_rx_mode(struct net_device *dev) -{ - struct mvpp2_port *port = netdev_priv(dev); - - /* Clear the whole UC and MC list */ - mvpp2_prs_mac_del_all(port); - - if (dev->flags & IFF_PROMISC) { - mvpp2_set_rx_promisc(port, true); - return; - } - - mvpp2_set_rx_promisc(port, false); - - if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || - mvpp2_prs_mac_da_accept_list(port, &dev->uc)) - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_UNI_CAST, true); - - if (dev->flags & IFF_ALLMULTI) { - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_MULTI_CAST, true); - return; - } - - if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || - mvpp2_prs_mac_da_accept_list(port, &dev->mc)) - mvpp2_prs_mac_promisc_set(port->priv, port->id, - MVPP2_PRS_L2_MULTI_CAST, true); -} - -static int mvpp2_set_mac_address(struct net_device *dev, void *p) -{ - struct mvpp2_port *port = netdev_priv(dev); - const struct sockaddr *addr = p; - int err; - - if (!is_valid_ether_addr(addr->sa_data)) { - err = -EADDRNOTAVAIL; - goto log_error; - } - - if (!netif_running(dev)) { - err = mvpp2_prs_update_mac_da(dev, addr->sa_data); - if (!err) - return 0; - /* Reconfigure parser to accept the original MAC address */ - err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); - if (err) - goto log_error; - } - - mvpp2_stop_dev(port); - - err = mvpp2_prs_update_mac_da(dev, addr->sa_data); - if (!err) - goto out_start; - - /* Reconfigure parser accept the original MAC address */ - err = mvpp2_prs_update_mac_da(dev, dev->dev_addr); - if (err) - goto log_error; -out_start: - mvpp2_start_dev(port); - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - return 0; -log_error: - netdev_err(dev, "failed to change MAC address\n"); - return err; -} - -static int mvpp2_change_mtu(struct net_device *dev, int mtu) -{ - struct mvpp2_port *port = netdev_priv(dev); - int err; - - if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { - netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, - ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); - mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); - } - - if (!netif_running(dev)) { - err = mvpp2_bm_update_mtu(dev, mtu); - if (!err) { - port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); - return 0; - } - - /* Reconfigure BM to the original MTU */ - err = mvpp2_bm_update_mtu(dev, dev->mtu); - if (err) - goto log_error; - } - - mvpp2_stop_dev(port); - - err = mvpp2_bm_update_mtu(dev, mtu); - if (!err) { - port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); - goto out_start; - } - - /* Reconfigure BM to the original MTU */ - err = mvpp2_bm_update_mtu(dev, dev->mtu); - if (err) - goto log_error; - -out_start: - mvpp2_start_dev(port); - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - - return 0; -log_error: - netdev_err(dev, "failed to change MTU\n"); - return err; -} - -static void -mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) -{ - struct mvpp2_port *port = netdev_priv(dev); - unsigned int start; - int cpu; - - for_each_possible_cpu(cpu) { - struct mvpp2_pcpu_stats *cpu_stats; - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; - - cpu_stats = per_cpu_ptr(port->stats, cpu); - do { - start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); - rx_packets = cpu_stats->rx_packets; - rx_bytes = cpu_stats->rx_bytes; - tx_packets = cpu_stats->tx_packets; - tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); - - stats->rx_packets += rx_packets; - stats->rx_bytes += rx_bytes; - stats->tx_packets += tx_packets; - stats->tx_bytes += tx_bytes; - } - - stats->rx_errors = dev->stats.rx_errors; - stats->rx_dropped = dev->stats.rx_dropped; - stats->tx_dropped = dev->stats.tx_dropped; -} - -static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - int ret; - - if (!dev->phydev) - return -ENOTSUPP; - - ret = phy_mii_ioctl(dev->phydev, ifr, cmd); - if (!ret) - mvpp2_link_event(dev); - - return ret; -} - -static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct mvpp2_port *port = netdev_priv(dev); - int ret; - - ret = mvpp2_prs_vid_entry_add(port, vid); - if (ret) - netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", - MVPP2_PRS_VLAN_FILT_MAX - 1); - return ret; -} - -static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) -{ - struct mvpp2_port *port = netdev_priv(dev); - - mvpp2_prs_vid_entry_remove(port, vid); - return 0; -} - -static int mvpp2_set_features(struct net_device *dev, - netdev_features_t features) -{ - netdev_features_t changed = dev->features ^ features; - struct mvpp2_port *port = netdev_priv(dev); - - if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { - mvpp2_prs_vid_enable_filtering(port); - } else { - /* Invalidate all registered VID filters for this - * port - */ - mvpp2_prs_vid_remove_all(port); - - mvpp2_prs_vid_disable_filtering(port); - } - } - - return 0; -} - -/* Ethtool methods */ - -/* Set interrupt coalescing for ethtools */ -static int mvpp2_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) -{ - struct mvpp2_port *port = netdev_priv(dev); - int queue; - - for (queue = 0; queue < port->nrxqs; queue++) { - struct mvpp2_rx_queue *rxq = port->rxqs[queue]; - - rxq->time_coal = c->rx_coalesce_usecs; - rxq->pkts_coal = c->rx_max_coalesced_frames; - mvpp2_rx_pkts_coal_set(port, rxq); - mvpp2_rx_time_coal_set(port, rxq); - } - - if (port->has_tx_irqs) { - port->tx_time_coal = c->tx_coalesce_usecs; - mvpp2_tx_time_coal_set(port); - } - - for (queue = 0; queue < port->ntxqs; queue++) { - struct mvpp2_tx_queue *txq = port->txqs[queue]; - - txq->done_pkts_coal = c->tx_max_coalesced_frames; - - if (port->has_tx_irqs) - mvpp2_tx_pkts_coal_set(port, txq); - } - - return 0; -} - -/* get coalescing for ethtools */ -static int mvpp2_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) -{ - struct mvpp2_port *port = netdev_priv(dev); - - c->rx_coalesce_usecs = port->rxqs[0]->time_coal; - c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; - c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; - c->tx_coalesce_usecs = port->tx_time_coal; - return 0; -} - -static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, - sizeof(drvinfo->version)); - strlcpy(drvinfo->bus_info, dev_name(&dev->dev), - sizeof(drvinfo->bus_info)); -} - -static void mvpp2_ethtool_get_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct mvpp2_port *port = netdev_priv(dev); - - ring->rx_max_pending = MVPP2_MAX_RXD_MAX; - ring->tx_max_pending = MVPP2_MAX_TXD_MAX; - ring->rx_pending = port->rx_ring_size; - ring->tx_pending = port->tx_ring_size; -} - -static int mvpp2_ethtool_set_ringparam(struct net_device *dev, - struct ethtool_ringparam *ring) -{ - struct mvpp2_port *port = netdev_priv(dev); - u16 prev_rx_ring_size = port->rx_ring_size; - u16 prev_tx_ring_size = port->tx_ring_size; - int err; - - err = mvpp2_check_ringparam_valid(dev, ring); - if (err) - return err; - - if (!netif_running(dev)) { - port->rx_ring_size = ring->rx_pending; - port->tx_ring_size = ring->tx_pending; - return 0; - } - - /* The interface is running, so we have to force a - * reallocation of the queues - */ - mvpp2_stop_dev(port); - mvpp2_cleanup_rxqs(port); - mvpp2_cleanup_txqs(port); - - port->rx_ring_size = ring->rx_pending; - port->tx_ring_size = ring->tx_pending; - - err = mvpp2_setup_rxqs(port); - if (err) { - /* Reallocate Rx queues with the original ring size */ - port->rx_ring_size = prev_rx_ring_size; - ring->rx_pending = prev_rx_ring_size; - err = mvpp2_setup_rxqs(port); - if (err) - goto err_out; - } - err = mvpp2_setup_txqs(port); - if (err) { - /* Reallocate Tx queues with the original ring size */ - port->tx_ring_size = prev_tx_ring_size; - ring->tx_pending = prev_tx_ring_size; - err = mvpp2_setup_txqs(port); - if (err) - goto err_clean_rxqs; - } - - mvpp2_start_dev(port); - mvpp2_egress_enable(port); - mvpp2_ingress_enable(port); - - return 0; - -err_clean_rxqs: - mvpp2_cleanup_rxqs(port); -err_out: - netdev_err(dev, "failed to change ring parameters"); - return err; -} - -/* Device ops */ - -static const struct net_device_ops mvpp2_netdev_ops = { - .ndo_open = mvpp2_open, - .ndo_stop = mvpp2_stop, - .ndo_start_xmit = mvpp2_tx, - .ndo_set_rx_mode = mvpp2_set_rx_mode, - .ndo_set_mac_address = mvpp2_set_mac_address, - .ndo_change_mtu = mvpp2_change_mtu, - .ndo_get_stats64 = mvpp2_get_stats64, - .ndo_do_ioctl = mvpp2_ioctl, - .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, - .ndo_set_features = mvpp2_set_features, -}; - -static const struct ethtool_ops mvpp2_eth_tool_ops = { - .nway_reset = phy_ethtool_nway_reset, - .get_link = ethtool_op_get_link, - .set_coalesce = mvpp2_ethtool_set_coalesce, - .get_coalesce = mvpp2_ethtool_get_coalesce, - .get_drvinfo = mvpp2_ethtool_get_drvinfo, - .get_ringparam = mvpp2_ethtool_get_ringparam, - .set_ringparam = mvpp2_ethtool_set_ringparam, - .get_strings = mvpp2_ethtool_get_strings, - .get_ethtool_stats = mvpp2_ethtool_get_stats, - .get_sset_count = mvpp2_ethtool_get_sset_count, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, -}; - -/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that - * had a single IRQ defined per-port. - */ -static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, - struct device_node *port_node) -{ - struct mvpp2_queue_vector *v = &port->qvecs[0]; - - v->first_rxq = 0; - v->nrxqs = port->nrxqs; - v->type = MVPP2_QUEUE_VECTOR_SHARED; - v->sw_thread_id = 0; - v->sw_thread_mask = *cpumask_bits(cpu_online_mask); - v->port = port; - v->irq = irq_of_parse_and_map(port_node, 0); - if (v->irq <= 0) - return -EINVAL; - netif_napi_add(port->dev, &v->napi, mvpp2_poll, - NAPI_POLL_WEIGHT); - - port->nqvecs = 1; - - return 0; -} - -static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, - struct device_node *port_node) -{ - struct mvpp2_queue_vector *v; - int i, ret; - - port->nqvecs = num_possible_cpus(); - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) - port->nqvecs += 1; - - for (i = 0; i < port->nqvecs; i++) { - char irqname[16]; - - v = port->qvecs + i; - - v->port = port; - v->type = MVPP2_QUEUE_VECTOR_PRIVATE; - v->sw_thread_id = i; - v->sw_thread_mask = BIT(i); - - snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); - - if (queue_mode == MVPP2_QDIST_MULTI_MODE) { - v->first_rxq = i * MVPP2_DEFAULT_RXQ; - v->nrxqs = MVPP2_DEFAULT_RXQ; - } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && - i == (port->nqvecs - 1)) { - v->first_rxq = 0; - v->nrxqs = port->nrxqs; - v->type = MVPP2_QUEUE_VECTOR_SHARED; - strncpy(irqname, "rx-shared", sizeof(irqname)); - } - - if (port_node) - v->irq = of_irq_get_byname(port_node, irqname); - else - v->irq = fwnode_irq_get(port->fwnode, i); - if (v->irq <= 0) { - ret = -EINVAL; - goto err; - } - - netif_napi_add(port->dev, &v->napi, mvpp2_poll, - NAPI_POLL_WEIGHT); - } - - return 0; - -err: - for (i = 0; i < port->nqvecs; i++) - irq_dispose_mapping(port->qvecs[i].irq); - return ret; -} - -static int mvpp2_queue_vectors_init(struct mvpp2_port *port, - struct device_node *port_node) -{ - if (port->has_tx_irqs) - return mvpp2_multi_queue_vectors_init(port, port_node); - else - return mvpp2_simple_queue_vectors_init(port, port_node); -} - -static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) -{ - int i; - - for (i = 0; i < port->nqvecs; i++) - irq_dispose_mapping(port->qvecs[i].irq); -} - -/* Configure Rx queue group interrupt for this port */ -static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) -{ - struct mvpp2 *priv = port->priv; - u32 val; - int i; - - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), - port->nrxqs); - return; - } - - /* Handle the more complicated PPv2.2 case */ - for (i = 0; i < port->nqvecs; i++) { - struct mvpp2_queue_vector *qv = port->qvecs + i; - - if (!qv->nrxqs) - continue; - - val = qv->sw_thread_id; - val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = qv->first_rxq; - val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } -} - -/* Initialize port HW */ -static int mvpp2_port_init(struct mvpp2_port *port) -{ - struct device *dev = port->dev->dev.parent; - struct mvpp2 *priv = port->priv; - struct mvpp2_txq_pcpu *txq_pcpu; - int queue, cpu, err; - - /* Checks for hardware constraints */ - if (port->first_rxq + port->nrxqs > - MVPP2_MAX_PORTS * priv->max_port_rxqs) - return -EINVAL; - - if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || - (port->ntxqs > MVPP2_MAX_TXQ)) - return -EINVAL; - - /* Disable port */ - mvpp2_egress_disable(port); - mvpp2_port_disable(port); - - port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; - - port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), - GFP_KERNEL); - if (!port->txqs) - return -ENOMEM; - - /* Associate physical Tx queues to this port and initialize. - * The mapping is predefined. - */ - for (queue = 0; queue < port->ntxqs; queue++) { - int queue_phy_id = mvpp2_txq_phys(port->id, queue); - struct mvpp2_tx_queue *txq; - - txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); - if (!txq) { - err = -ENOMEM; - goto err_free_percpu; - } - - txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); - if (!txq->pcpu) { - err = -ENOMEM; - goto err_free_percpu; - } - - txq->id = queue_phy_id; - txq->log_id = queue; - txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->cpu = cpu; - } - - port->txqs[queue] = txq; - } - - port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), - GFP_KERNEL); - if (!port->rxqs) { - err = -ENOMEM; - goto err_free_percpu; - } - - /* Allocate and initialize Rx queue for this port */ - for (queue = 0; queue < port->nrxqs; queue++) { - struct mvpp2_rx_queue *rxq; - - /* Map physical Rx queue to port's logical Rx queue */ - rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); - if (!rxq) { - err = -ENOMEM; - goto err_free_percpu; - } - /* Map this Rx queue to a physical queue */ - rxq->id = port->first_rxq + queue; - rxq->port = port->id; - rxq->logic_rxq = queue; - - port->rxqs[queue] = rxq; - } - - mvpp2_rx_irqs_setup(port); - - /* Create Rx descriptor rings */ - for (queue = 0; queue < port->nrxqs; queue++) { - struct mvpp2_rx_queue *rxq = port->rxqs[queue]; - - rxq->size = port->rx_ring_size; - rxq->pkts_coal = MVPP2_RX_COAL_PKTS; - rxq->time_coal = MVPP2_RX_COAL_USEC; - } - - mvpp2_ingress_disable(port); - - /* Port default configuration */ - mvpp2_defaults_set(port); - - /* Port's classifier configuration */ - mvpp2_cls_oversize_rxq_set(port); - mvpp2_cls_port_config(port); - - /* Provide an initial Rx packet size */ - port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); - - /* Initialize pools for swf */ - err = mvpp2_swf_bm_pool_init(port); - if (err) - goto err_free_percpu; - - return 0; - -err_free_percpu: - for (queue = 0; queue < port->ntxqs; queue++) { - if (!port->txqs[queue]) - continue; - free_percpu(port->txqs[queue]->pcpu); - } - return err; -} - -/* Checks if the port DT description has the TX interrupts - * described. On PPv2.1, there are no such interrupts. On PPv2.2, - * there are available, but we need to keep support for old DTs. - */ -static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, - struct device_node *port_node) -{ - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", - "tx-cpu2", "tx-cpu3" }; - int ret, i; - - if (priv->hw_version == MVPP21) - return false; - - for (i = 0; i < 5; i++) { - ret = of_property_match_string(port_node, "interrupt-names", - irqs[i]); - if (ret < 0) - return false; - } - - return true; -} - -static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, - struct fwnode_handle *fwnode, - char **mac_from) -{ - struct mvpp2_port *port = netdev_priv(dev); - char hw_mac_addr[ETH_ALEN] = {0}; - char fw_mac_addr[ETH_ALEN]; - - if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { - *mac_from = "firmware node"; - ether_addr_copy(dev->dev_addr, fw_mac_addr); - return; - } - - if (priv->hw_version == MVPP21) { - mvpp21_get_mac_address(port, hw_mac_addr); - if (is_valid_ether_addr(hw_mac_addr)) { - *mac_from = "hardware"; - ether_addr_copy(dev->dev_addr, hw_mac_addr); - return; - } - } - - *mac_from = "random"; - eth_hw_addr_random(dev); -} - -/* Ports initialization */ -static int mvpp2_port_probe(struct platform_device *pdev, - struct fwnode_handle *port_fwnode, - struct mvpp2 *priv) -{ - struct device_node *phy_node; - struct phy *comphy = NULL; - struct mvpp2_port *port; - struct mvpp2_port_pcpu *port_pcpu; - struct device_node *port_node = to_of_node(port_fwnode); - struct net_device *dev; - struct resource *res; - char *mac_from = ""; - unsigned int ntxqs, nrxqs; - bool has_tx_irqs; - u32 id; - int features; - int phy_mode; - int err, i, cpu; - - if (port_node) { - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); - } else { - has_tx_irqs = true; - queue_mode = MVPP2_QDIST_MULTI_MODE; - } - - if (!has_tx_irqs) - queue_mode = MVPP2_QDIST_SINGLE_MODE; - - ntxqs = MVPP2_MAX_TXQ; - if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) - nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); - else - nrxqs = MVPP2_DEFAULT_RXQ; - - dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); - if (!dev) - return -ENOMEM; - - if (port_node) - phy_node = of_parse_phandle(port_node, "phy", 0); - else - phy_node = NULL; - - phy_mode = fwnode_get_phy_mode(port_fwnode); - if (phy_mode < 0) { - dev_err(&pdev->dev, "incorrect phy mode\n"); - err = phy_mode; - goto err_free_netdev; - } - - if (port_node) { - comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); - if (IS_ERR(comphy)) { - if (PTR_ERR(comphy) == -EPROBE_DEFER) { - err = -EPROBE_DEFER; - goto err_free_netdev; - } - comphy = NULL; - } - } - - if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { - err = -EINVAL; - dev_err(&pdev->dev, "missing port-id value\n"); - goto err_free_netdev; - } - - dev->tx_queue_len = MVPP2_MAX_TXD_MAX; - dev->watchdog_timeo = 5 * HZ; - dev->netdev_ops = &mvpp2_netdev_ops; - dev->ethtool_ops = &mvpp2_eth_tool_ops; - - port = netdev_priv(dev); - port->dev = dev; - port->fwnode = port_fwnode; - port->ntxqs = ntxqs; - port->nrxqs = nrxqs; - port->priv = priv; - port->has_tx_irqs = has_tx_irqs; - - err = mvpp2_queue_vectors_init(port, port_node); - if (err) - goto err_free_netdev; - - if (port_node) - port->link_irq = of_irq_get_byname(port_node, "link"); - else - port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); - if (port->link_irq == -EPROBE_DEFER) { - err = -EPROBE_DEFER; - goto err_deinit_qvecs; - } - if (port->link_irq <= 0) - /* the link irq is optional */ - port->link_irq = 0; - - if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) - port->flags |= MVPP2_F_LOOPBACK; - - port->id = id; - if (priv->hw_version == MVPP21) - port->first_rxq = port->id * port->nrxqs; - else - port->first_rxq = port->id * priv->max_port_rxqs; - - port->phy_node = phy_node; - port->phy_interface = phy_mode; - port->comphy = comphy; - - if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); - port->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(port->base)) { - err = PTR_ERR(port->base); - goto err_free_irq; - } - - port->stats_base = port->priv->lms_base + - MVPP21_MIB_COUNTERS_OFFSET + - port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; - } else { - if (fwnode_property_read_u32(port_fwnode, "gop-port-id", - &port->gop_id)) { - err = -EINVAL; - dev_err(&pdev->dev, "missing gop-port-id value\n"); - goto err_deinit_qvecs; - } - - port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); - port->stats_base = port->priv->iface_base + - MVPP22_MIB_COUNTERS_OFFSET + - port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; - } - - /* Alloc per-cpu and ethtool stats */ - port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); - if (!port->stats) { - err = -ENOMEM; - goto err_free_irq; - } - - port->ethtool_stats = devm_kcalloc(&pdev->dev, - ARRAY_SIZE(mvpp2_ethtool_regs), - sizeof(u64), GFP_KERNEL); - if (!port->ethtool_stats) { - err = -ENOMEM; - goto err_free_stats; - } - - mutex_init(&port->gather_stats_lock); - INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); - - mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); - - port->tx_ring_size = MVPP2_MAX_TXD_DFLT; - port->rx_ring_size = MVPP2_MAX_RXD_DFLT; - SET_NETDEV_DEV(dev, &pdev->dev); - - err = mvpp2_port_init(port); - if (err < 0) { - dev_err(&pdev->dev, "failed to init port %d\n", id); - goto err_free_stats; - } - - mvpp2_port_periodic_xon_disable(port); - - if (priv->hw_version == MVPP21) - mvpp2_port_fc_adv_enable(port); - - mvpp2_port_reset(port); - - port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); - if (!port->pcpu) { - err = -ENOMEM; - goto err_free_txq_pcpu; - } - - if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); - - hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); - port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; - port_pcpu->timer_scheduled = false; - - tasklet_init(&port_pcpu->tx_done_tasklet, - mvpp2_tx_proc_cb, - (unsigned long)dev); - } - } - - features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO; - dev->features = features | NETIF_F_RXCSUM; - dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | - NETIF_F_HW_VLAN_CTAG_FILTER; - - if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - } - - dev->vlan_features |= features; - dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; - dev->priv_flags |= IFF_UNICAST_FLT; - - /* MTU range: 68 - 9704 */ - dev->min_mtu = ETH_MIN_MTU; - /* 9704 == 9728 - 20 and rounding to 8 */ - dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; - - err = register_netdev(dev); - if (err < 0) { - dev_err(&pdev->dev, "failed to register netdev\n"); - goto err_free_port_pcpu; - } - netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); - - priv->port_list[priv->port_count++] = port; - - return 0; - -err_free_port_pcpu: - free_percpu(port->pcpu); -err_free_txq_pcpu: - for (i = 0; i < port->ntxqs; i++) - free_percpu(port->txqs[i]->pcpu); -err_free_stats: - free_percpu(port->stats); -err_free_irq: - if (port->link_irq) - irq_dispose_mapping(port->link_irq); -err_deinit_qvecs: - mvpp2_queue_vectors_deinit(port); -err_free_netdev: - of_node_put(phy_node); - free_netdev(dev); - return err; -} - -/* Ports removal routine */ -static void mvpp2_port_remove(struct mvpp2_port *port) -{ - int i; - - unregister_netdev(port->dev); - of_node_put(port->phy_node); - free_percpu(port->pcpu); - free_percpu(port->stats); - for (i = 0; i < port->ntxqs; i++) - free_percpu(port->txqs[i]->pcpu); - mvpp2_queue_vectors_deinit(port); - if (port->link_irq) - irq_dispose_mapping(port->link_irq); - free_netdev(port->dev); -} - -/* Initialize decoding windows */ -static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, - struct mvpp2 *priv) -{ - u32 win_enable; - int i; - - for (i = 0; i < 6; i++) { - mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); - mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); - - if (i < 4) - mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); - } - - win_enable = 0; - - for (i = 0; i < dram->num_cs; i++) { - const struct mbus_dram_window *cs = dram->cs + i; - - mvpp2_write(priv, MVPP2_WIN_BASE(i), - (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | - dram->mbus_dram_target_id); - - mvpp2_write(priv, MVPP2_WIN_SIZE(i), - (cs->size - 1) & 0xffff0000); - - win_enable |= (1 << i); - } - - mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); -} - -/* Initialize Rx FIFO's */ -static void mvpp2_rx_fifo_init(struct mvpp2 *priv) -{ - int port; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); - } - - mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, - MVPP2_RX_FIFO_PORT_MIN_PKT); - mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); -} - -static void mvpp22_rx_fifo_init(struct mvpp2 *priv) -{ - int port; - - /* The FIFO size parameters are set depending on the maximum speed a - * given port can handle: - * - Port 0: 10Gbps - * - Port 1: 2.5Gbps - * - Ports 2 and 3: 1Gbps - */ - - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), - MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); - - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), - MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); - - for (port = 2; port < MVPP2_MAX_PORTS; port++) { - mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); - mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); - } - - mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, - MVPP2_RX_FIFO_PORT_MIN_PKT); - mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); -} - -/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G - * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, - * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. - */ -static void mvpp22_tx_fifo_init(struct mvpp2 *priv) -{ - int port, size, thrs; - - for (port = 0; port < MVPP2_MAX_PORTS; port++) { - if (port == 0) { - size = MVPP22_TX_FIFO_DATA_SIZE_10KB; - thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; - } else { - size = MVPP22_TX_FIFO_DATA_SIZE_3KB; - thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; - } - mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); - mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); - } -} - -static void mvpp2_axi_init(struct mvpp2 *priv) -{ - u32 val, rdval, wrval; - - mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); - - /* AXI Bridge Configuration */ - - rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE - << MVPP22_AXI_ATTR_CACHE_OFFS; - rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_ATTR_DOMAIN_OFFS; - - wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE - << MVPP22_AXI_ATTR_CACHE_OFFS; - wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_ATTR_DOMAIN_OFFS; - - /* BM */ - mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); - mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); - - /* Descriptors */ - mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); - mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); - mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); - mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); - - /* Buffer Data */ - mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); - mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); - - val = MVPP22_AXI_CODE_CACHE_NON_CACHE - << MVPP22_AXI_CODE_CACHE_OFFS; - val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM - << MVPP22_AXI_CODE_DOMAIN_OFFS; - mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); - mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); - - val = MVPP22_AXI_CODE_CACHE_RD_CACHE - << MVPP22_AXI_CODE_CACHE_OFFS; - val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_CODE_DOMAIN_OFFS; - - mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); - - val = MVPP22_AXI_CODE_CACHE_WR_CACHE - << MVPP22_AXI_CODE_CACHE_OFFS; - val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM - << MVPP22_AXI_CODE_DOMAIN_OFFS; - - mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); -} - -/* Initialize network controller common part HW */ -static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) -{ - const struct mbus_dram_target_info *dram_target_info; - int err, i; - u32 val; - - /* MBUS windows configuration */ - dram_target_info = mv_mbus_dram_info(); - if (dram_target_info) - mvpp2_conf_mbus_windows(dram_target_info, priv); - - if (priv->hw_version == MVPP22) - mvpp2_axi_init(priv); - - /* Disable HW PHY polling */ - if (priv->hw_version == MVPP21) { - val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); - val |= MVPP2_PHY_AN_STOP_SMI0_MASK; - writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); - } else { - val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); - val &= ~MVPP22_SMI_POLLING_EN; - writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); - } - - /* Allocate and initialize aggregated TXQs */ - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), - sizeof(*priv->aggr_txqs), - GFP_KERNEL); - if (!priv->aggr_txqs) - return -ENOMEM; - - for_each_present_cpu(i) { - priv->aggr_txqs[i].id = i; - priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; - err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); - if (err < 0) - return err; - } - - /* Fifo Init */ - if (priv->hw_version == MVPP21) { - mvpp2_rx_fifo_init(priv); - } else { - mvpp22_rx_fifo_init(priv); - mvpp22_tx_fifo_init(priv); - } - - if (priv->hw_version == MVPP21) - writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, - priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); - - /* Allow cache snoop when transmiting packets */ - mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); - - /* Buffer Manager initialization */ - err = mvpp2_bm_init(pdev, priv); - if (err < 0) - return err; - - /* Parser default initialization */ - err = mvpp2_prs_default_init(pdev, priv); - if (err < 0) - return err; - - /* Classifier default initialization */ - mvpp2_cls_init(priv); - - return 0; -} - -static int mvpp2_probe(struct platform_device *pdev) -{ - const struct acpi_device_id *acpi_id; - struct fwnode_handle *fwnode = pdev->dev.fwnode; - struct fwnode_handle *port_fwnode; - struct mvpp2 *priv; - struct resource *res; - void __iomem *base; - int i; - int err; - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - if (has_acpi_companion(&pdev->dev)) { - acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, - &pdev->dev); - priv->hw_version = (unsigned long)acpi_id->driver_data; - } else { - priv->hw_version = - (unsigned long)of_device_get_match_data(&pdev->dev); - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); - - if (priv->hw_version == MVPP21) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - priv->lms_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->lms_base)) - return PTR_ERR(priv->lms_base); - } else { - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (has_acpi_companion(&pdev->dev)) { - /* In case the MDIO memory region is declared in - * the ACPI, it can already appear as 'in-use' - * in the OS. Because it is overlapped by second - * region of the network controller, make - * sure it is released, before requesting it again. - * The care is taken by mvpp2 driver to avoid - * concurrent access to this memory region. - */ - release_resource(res); - } - priv->iface_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->iface_base)) - return PTR_ERR(priv->iface_base); - } - - if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { - priv->sysctrl_base = - syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "marvell,system-controller"); - if (IS_ERR(priv->sysctrl_base)) - /* The system controller regmap is optional for dt - * compatibility reasons. When not provided, the - * configuration of the GoP relies on the - * firmware/bootloader. - */ - priv->sysctrl_base = NULL; - } - - mvpp2_setup_bm_pool(); - - for (i = 0; i < MVPP2_MAX_THREADS; i++) { - u32 addr_space_sz; - - addr_space_sz = (priv->hw_version == MVPP21 ? - MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); - priv->swth_base[i] = base + i * addr_space_sz; - } - - if (priv->hw_version == MVPP21) - priv->max_port_rxqs = 8; - else - priv->max_port_rxqs = 32; - - if (dev_of_node(&pdev->dev)) { - priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); - if (IS_ERR(priv->pp_clk)) - return PTR_ERR(priv->pp_clk); - err = clk_prepare_enable(priv->pp_clk); - if (err < 0) - return err; - - priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); - if (IS_ERR(priv->gop_clk)) { - err = PTR_ERR(priv->gop_clk); - goto err_pp_clk; - } - err = clk_prepare_enable(priv->gop_clk); - if (err < 0) - goto err_pp_clk; - - if (priv->hw_version == MVPP22) { - priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); - if (IS_ERR(priv->mg_clk)) { - err = PTR_ERR(priv->mg_clk); - goto err_gop_clk; - } - - err = clk_prepare_enable(priv->mg_clk); - if (err < 0) - goto err_gop_clk; - - priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); - if (IS_ERR(priv->mg_core_clk)) { - priv->mg_core_clk = NULL; - } else { - err = clk_prepare_enable(priv->mg_core_clk); - if (err < 0) - goto err_mg_clk; - } - } - - priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); - if (IS_ERR(priv->axi_clk)) { - err = PTR_ERR(priv->axi_clk); - if (err == -EPROBE_DEFER) - goto err_mg_core_clk; - priv->axi_clk = NULL; - } else { - err = clk_prepare_enable(priv->axi_clk); - if (err < 0) - goto err_mg_core_clk; - } - - /* Get system's tclk rate */ - priv->tclk = clk_get_rate(priv->pp_clk); - } else if (device_property_read_u32(&pdev->dev, "clock-frequency", - &priv->tclk)) { - dev_err(&pdev->dev, "missing clock-frequency value\n"); - return -EINVAL; - } - - if (priv->hw_version == MVPP22) { - err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); - if (err) - goto err_axi_clk; - /* Sadly, the BM pools all share the same register to - * store the high 32 bits of their address. So they - * must all have the same high 32 bits, which forces - * us to restrict coherent memory to DMA_BIT_MASK(32). - */ - err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (err) - goto err_axi_clk; - } - - /* Initialize network controller */ - err = mvpp2_init(pdev, priv); - if (err < 0) { - dev_err(&pdev->dev, "failed to initialize controller\n"); - goto err_axi_clk; - } - - /* Initialize ports */ - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - err = mvpp2_port_probe(pdev, port_fwnode, priv); - if (err < 0) - goto err_port_probe; - } - - if (priv->port_count == 0) { - dev_err(&pdev->dev, "no ports enabled\n"); - err = -ENODEV; - goto err_axi_clk; - } - - /* Statistics must be gathered regularly because some of them (like - * packets counters) are 32-bit registers and could overflow quite - * quickly. For instance, a 10Gb link used at full bandwidth with the - * smallest packets (64B) will overflow a 32-bit counter in less than - * 30 seconds. Then, use a workqueue to fill 64-bit counters. - */ - snprintf(priv->queue_name, sizeof(priv->queue_name), - "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), - priv->port_count > 1 ? "+" : ""); - priv->stats_queue = create_singlethread_workqueue(priv->queue_name); - if (!priv->stats_queue) { - err = -ENOMEM; - goto err_port_probe; - } - - platform_set_drvdata(pdev, priv); - return 0; - -err_port_probe: - i = 0; - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - if (priv->port_list[i]) - mvpp2_port_remove(priv->port_list[i]); - i++; - } -err_axi_clk: - clk_disable_unprepare(priv->axi_clk); - -err_mg_core_clk: - if (priv->hw_version == MVPP22) - clk_disable_unprepare(priv->mg_core_clk); -err_mg_clk: - if (priv->hw_version == MVPP22) - clk_disable_unprepare(priv->mg_clk); -err_gop_clk: - clk_disable_unprepare(priv->gop_clk); -err_pp_clk: - clk_disable_unprepare(priv->pp_clk); - return err; -} - -static int mvpp2_remove(struct platform_device *pdev) -{ - struct mvpp2 *priv = platform_get_drvdata(pdev); - struct fwnode_handle *fwnode = pdev->dev.fwnode; - struct fwnode_handle *port_fwnode; - int i = 0; - - flush_workqueue(priv->stats_queue); - destroy_workqueue(priv->stats_queue); - - fwnode_for_each_available_child_node(fwnode, port_fwnode) { - if (priv->port_list[i]) { - mutex_destroy(&priv->port_list[i]->gather_stats_lock); - mvpp2_port_remove(priv->port_list[i]); - } - i++; - } - - for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { - struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; - - mvpp2_bm_pool_destroy(pdev, priv, bm_pool); - } - - for_each_present_cpu(i) { - struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; - - dma_free_coherent(&pdev->dev, - MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, - aggr_txq->descs, - aggr_txq->descs_dma); - } - - if (is_acpi_node(port_fwnode)) - return 0; - - clk_disable_unprepare(priv->axi_clk); - clk_disable_unprepare(priv->mg_core_clk); - clk_disable_unprepare(priv->mg_clk); - clk_disable_unprepare(priv->pp_clk); - clk_disable_unprepare(priv->gop_clk); - - return 0; -} - -static const struct of_device_id mvpp2_match[] = { - { - .compatible = "marvell,armada-375-pp2", - .data = (void *)MVPP21, - }, - { - .compatible = "marvell,armada-7k-pp22", - .data = (void *)MVPP22, - }, - { } -}; -MODULE_DEVICE_TABLE(of, mvpp2_match); - -static const struct acpi_device_id mvpp2_acpi_match[] = { - { "MRVL0110", MVPP22 }, - { }, -}; -MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); - -static struct platform_driver mvpp2_driver = { - .probe = mvpp2_probe, - .remove = mvpp2_remove, - .driver = { - .name = MVPP2_DRIVER_NAME, - .of_match_table = mvpp2_match, - .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), - }, -}; - -module_platform_driver(mvpp2_driver); - -MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); -MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile new file mode 100644 index 000000000000..4d11dd9e3246 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Marvell PPv2 driver. +# +obj-$(CONFIG_MVPP2) := mvpp2.o + +mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h new file mode 100644 index 000000000000..def00dc3eb4e --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -0,0 +1,1046 @@ +/* + * Definitions for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#ifndef _MVPP2_H_ +#define _MVPP2_H_ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/phylink.h> + +/* Fifo Registers */ +#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) +#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) +#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 +#define MVPP2_RX_FIFO_INIT_REG 0x64 +#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port)) +#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) + +/* RX DMA Top Registers */ +#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) +#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) +#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) +#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) +#define MVPP2_POOL_BUF_SIZE_OFFSET 5 +#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) +#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff +#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) +#define MVPP2_RXQ_POOL_SHORT_OFFS 20 +#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 +#define MVPP2_RXQ_POOL_LONG_OFFS 24 +#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 +#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 +#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 +#define MVPP2_RXQ_DISABLE_MASK BIT(31) + +/* Top Registers */ +#define MVPP2_MH_REG(port) (0x5040 + 4 * (port)) +#define MVPP2_DSA_EXTENDED BIT(5) + +/* Parser Registers */ +#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 +#define MVPP2_PRS_PORT_LU_MAX 0xf +#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) +#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) +#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) +#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) +#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) +#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_TCAM_IDX_REG 0x1100 +#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) +#define MVPP2_PRS_TCAM_INV_MASK BIT(31) +#define MVPP2_PRS_SRAM_IDX_REG 0x1200 +#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) +#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 +#define MVPP2_PRS_TCAM_EN_MASK BIT(0) + +/* RSS Registers */ +#define MVPP22_RSS_INDEX 0x1500 +#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx) +#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) +#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 +#define MVPP22_RSS_TABLE 0x1510 +#define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_WIDTH 0x150c + +/* Classifier Registers */ +#define MVPP2_CLS_MODE_REG 0x1800 +#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) +#define MVPP2_CLS_PORT_WAY_REG 0x1810 +#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) +#define MVPP2_CLS_LKP_INDEX_REG 0x1814 +#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 +#define MVPP2_CLS_LKP_TBL_REG 0x1818 +#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) +#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 +#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 +#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) +#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 +#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) + +/* Descriptor Manager Top Registers */ +#define MVPP2_RXQ_NUM_REG 0x2040 +#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP22_DESC_ADDR_OFFS 8 +#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 +#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) +#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 +#define MVPP2_RXQ_NUM_NEW_OFFSET 16 +#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) +#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff +#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 +#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 +#define MVPP2_RXQ_THRESH_REG 0x204c +#define MVPP2_OCCUPIED_THRESH_OFFSET 0 +#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff +#define MVPP2_RXQ_INDEX_REG 0x2050 +#define MVPP2_TXQ_NUM_REG 0x2080 +#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 +#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 +#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TXQ_THRESH_OFFSET 16 +#define MVPP2_TXQ_THRESH_MASK 0x3fff +#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 +#define MVPP2_TXQ_INDEX_REG 0x2098 +#define MVPP2_TXQ_PREF_BUF_REG 0x209c +#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) +#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) +#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) +#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) +#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) +#define MVPP2_TXQ_PENDING_REG 0x20a0 +#define MVPP2_TXQ_PENDING_MASK 0x3fff +#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 +#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) +#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 +#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 +#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 +#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 +#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 +#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff +#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 +#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 +#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 +#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff +#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) + +/* MBUS bridge registers */ +#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) +#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) +#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) +#define MVPP2_BASE_ADDR_ENABLE 0x4060 + +/* AXI Bridge Registers */ +#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 +#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 +#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 +#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 +#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 +#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c +#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 +#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 +#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 +#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 +#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 +#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 + +/* Values for AXI Bridge registers */ +#define MVPP22_AXI_ATTR_CACHE_OFFS 0 +#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 + +#define MVPP22_AXI_CODE_CACHE_OFFS 0 +#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 + +#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 +#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 +#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb + +#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 +#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 + +/* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) +#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 + +#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 +#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) + +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 + +#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 + +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 + +#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) +#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) +#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) +#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 +#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) +#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) +#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) +#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) +#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) +#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) +#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc +#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 +#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 + +/* Buffer Manager registers */ +#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) +#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 +#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) +#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 +#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) +#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 +#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) +#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 +#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8 +#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) +#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) +#define MVPP2_BM_START_MASK BIT(0) +#define MVPP2_BM_STOP_MASK BIT(1) +#define MVPP2_BM_STATE_MASK BIT(4) +#define MVPP2_BM_LOW_THRESH_OFFS 8 +#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 +#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_LOW_THRESH_OFFS) +#define MVPP2_BM_HIGH_THRESH_OFFS 16 +#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 +#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_HIGH_THRESH_OFFS) +#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) +#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) +#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) +#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) +#define MVPP2_BM_BPPE_FULL_MASK BIT(3) +#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) +#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) +#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444 +#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8 +#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) +#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) +#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) +#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) +#define MVPP2_BM_VIRT_RLS_REG 0x64c0 +#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 +#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 + +/* TX Scheduler registers */ +#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 +#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 +#define MVPP2_TXP_SCHED_ENQ_MASK 0xff +#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 +#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 +#define MVPP2_TXP_SCHED_MTU_REG 0x801c +#define MVPP2_TXP_MTU_MAX 0x7FFFF +#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 +#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 +#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff +#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) +#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff + +/* TX general registers */ +#define MVPP2_TX_SNOOP_REG 0x8800 +#define MVPP2_TX_PORT_FLUSH_REG 0x8810 +#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) + +/* LMS registers */ +#define MVPP2_SRC_ADDR_MIDDLE 0x24 +#define MVPP2_SRC_ADDR_HIGH 0x28 +#define MVPP2_PHY_AN_CFG0_REG 0x34 +#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) +#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 + +/* Per-port registers */ +#define MVPP2_GMAC_CTRL_0_REG 0x0 +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_CTRL_1_REG 0x4 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_CTRL_2_REG 0x8 +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) +#define MVPP2_GMAC_DISABLE_PADDING BIT(5) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_AUTONEG_CONFIG 0xc +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) +#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) +#define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_FC_ADV_ASM_EN BIT(10) +#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_STATUS0 0x10 +#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0) +#define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) +#define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) +#define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) +#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) +#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) +#define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) +#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ + MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +#define MVPP22_GMAC_INT_STAT 0x20 +#define MVPP22_GMAC_INT_STAT_LINK BIT(1) +#define MVPP22_GMAC_INT_MASK 0x24 +#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1) +#define MVPP22_GMAC_CTRL_4_REG 0x90 +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_RX_FC_EN BIT(3) +#define MVPP22_CTRL4_TX_FC_EN BIT(4) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) +#define MVPP22_GMAC_INT_SUM_MASK 0xa4 +#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1) + +/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, + * relative to port->base. + */ +#define MVPP22_XLG_CTRL0_REG 0x100 +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) +#define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) +#define MVPP22_XLG_CTRL1_REG 0x104 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff +#define MVPP22_XLG_STATUS 0x10c +#define MVPP22_XLG_STATUS_LINK_UP BIT(0) +#define MVPP22_XLG_INT_STAT 0x114 +#define MVPP22_XLG_INT_STAT_LINK BIT(1) +#define MVPP22_XLG_INT_MASK 0x118 +#define MVPP22_XLG_INT_MASK_LINK BIT(1) +#define MVPP22_XLG_CTRL3_REG 0x11c +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_EXT_INT_MASK 0x15c +#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1) +#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2) +#define MVPP22_XLG_CTRL4_REG 0x184 +#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) +#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) +#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) +#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14) + +/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ +#define MVPP22_SMI_MISC_CFG_REG 0x1204 +#define MVPP22_SMI_POLLING_EN BIT(10) + +#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) + +#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Descriptor ring Macros */ +#define MVPP2_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) +#define MVPP22_MPCS_CTRL 0x14 +#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) +#define MVPP22_MPCS_CLK_RESET 0x14c +#define MAC_CLK_RESET_SD_TX BIT(0) +#define MAC_CLK_RESET_SD_RX BIT(1) +#define MAC_CLK_RESET_MAC BIT(2) +#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) +#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) +#define MVPP22_XPCS_CFG0 0x0 +#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) +#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) + +/* System controller registers. Accessed through a regmap. */ +#define GENCONF_SOFT_RESET1 0x1108 +#define GENCONF_SOFT_RESET1_GOP BIT(6) +#define GENCONF_PORT_CTRL0 0x1110 +#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) +#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) +#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) +#define GENCONF_PORT_CTRL1 0x1114 +#define GENCONF_PORT_CTRL1_EN(p) BIT(p) +#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) +#define GENCONF_CTRL0 0x1120 +#define GENCONF_CTRL0_PORT0_RGMII BIT(0) +#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) +#define GENCONF_CTRL0_PORT1_RGMII BIT(2) + +/* Various constants */ + +/* Coalescing */ +#define MVPP2_TXDONE_COAL_PKTS_THRESH 64 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_TXDONE_COAL_USEC 1000 +#define MVPP2_RX_COAL_PKTS 32 +#define MVPP2_RX_COAL_USEC 64 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVPP2_MH_SIZE 2 +#define MVPP2_ETH_TYPE_LEN 2 +#define MVPP2_PPPOE_HDR_SIZE 8 +#define MVPP2_VLAN_TAG_LEN 4 +#define MVPP2_VLAN_TAG_EDSA_LEN 8 + +/* Lbtd 802.3 type */ +#define MVPP2_IP_LBDT_TYPE 0xfffa + +#define MVPP2_TX_CSUM_MAX_SIZE 9800 + +/* Timeout constants */ +#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 + +#define MVPP2_TX_MTU_MAX 0x7ffff + +/* Maximum number of T-CONTs of PON port */ +#define MVPP2_MAX_TCONT 16 + +/* Maximum number of supported ports */ +#define MVPP2_MAX_PORTS 4 + +/* Maximum number of TXQs used by single port */ +#define MVPP2_MAX_TXQ 8 + +/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO + * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), + * multiply this value by two to count the maximum number of skb descs needed. + */ +#define MVPP2_MAX_TSO_SEGS 300 +#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + +/* Dfault number of RXQs in use */ +#define MVPP2_DEFAULT_RXQ 4 + +/* Max number of Rx descriptors */ +#define MVPP2_MAX_RXD_MAX 1024 +#define MVPP2_MAX_RXD_DFLT 128 + +/* Max number of Tx descriptors */ +#define MVPP2_MAX_TXD_MAX 2048 +#define MVPP2_MAX_TXD_DFLT 1024 + +/* Amount of Tx descriptors that can be reserved at once by CPU */ +#define MVPP2_CPU_DESC_CHUNK 64 + +/* Max number of Tx descriptors in each aggregated queue */ +#define MVPP2_AGGR_TXQ_SIZE 256 + +/* Descriptor aligned size */ +#define MVPP2_DESC_ALIGNED_SIZE 32 + +/* Descriptor alignment mask */ +#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) + +/* RX FIFO constants */ +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 + +/* TX FIFO constants */ +#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa +#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 +#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 +#define MVPP2_TX_FIFO_THRESHOLD_10KB \ + (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) +#define MVPP2_TX_FIFO_THRESHOLD_3KB \ + (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN) + +/* RX buffer constants */ +#define MVPP2_SKB_SHINFO_SIZE \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +#define MVPP2_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, cache_line_size()) + +#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) +#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ + ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) + +#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) + +/* IPv6 max L3 address size */ +#define MVPP2_MAX_L3_ADDR_SIZE 16 + +/* Port flags */ +#define MVPP2_F_LOOPBACK BIT(0) + +/* Marvell tag types */ +enum mvpp2_tag_type { + MVPP2_TAG_TYPE_NONE = 0, + MVPP2_TAG_TYPE_MH = 1, + MVPP2_TAG_TYPE_DSA = 2, + MVPP2_TAG_TYPE_EDSA = 3, + MVPP2_TAG_TYPE_VLAN = 4, + MVPP2_TAG_TYPE_LAST = 5 +}; + +/* L2 cast enum */ +enum mvpp2_prs_l2_cast { + MVPP2_PRS_L2_UNI_CAST, + MVPP2_PRS_L2_MULTI_CAST, +}; + +/* L3 cast enum */ +enum mvpp2_prs_l3_cast { + MVPP2_PRS_L3_UNI_CAST, + MVPP2_PRS_L3_MULTI_CAST, + MVPP2_PRS_L3_BROAD_CAST +}; + +/* BM constants */ +#define MVPP2_BM_JUMBO_BUF_NUM 512 +#define MVPP2_BM_LONG_BUF_NUM 1024 +#define MVPP2_BM_SHORT_BUF_NUM 2048 +#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) +#define MVPP2_BM_POOL_PTR_ALIGN 128 + +/* BM cookie (32 bits) definition */ +#define MVPP2_BM_COOKIE_POOL_OFFS 8 +#define MVPP2_BM_COOKIE_CPU_OFFS 24 + +#define MVPP2_BM_SHORT_FRAME_SIZE 512 +#define MVPP2_BM_LONG_FRAME_SIZE 2048 +#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 +/* BM short pool packet size + * These value assure that for SWF the total number + * of bytes allocated for each buffer will be 512 + */ +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE) +#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE) +#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE) + +#define MVPP21_ADDR_SPACE_SZ 0 +#define MVPP22_ADDR_SPACE_SZ SZ_64K + +#define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS + +/* GMAC MIB Counters register definitions */ +#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 +#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 +#define MVPP22_MIB_COUNTERS_OFFSET 0x0 +#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 + +#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 +#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 +#define MVPP2_MIB_CRC_ERRORS_SENT 0xc +#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 +#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 +#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c +#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 +#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 +#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 +#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c +#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 +#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 +#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 +#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 +#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 +#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c +#define MVPP2_MIB_FC_SENT 0x54 +#define MVPP2_MIB_FC_RCVD 0x58 +#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c +#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 +#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 +#define MVPP2_MIB_OVERSIZE_RCVD 0x68 +#define MVPP2_MIB_JABBER_RCVD 0x6c +#define MVPP2_MIB_MAC_RCV_ERROR 0x70 +#define MVPP2_MIB_BAD_CRC_EVENT 0x74 +#define MVPP2_MIB_COLLISION 0x78 +#define MVPP2_MIB_LATE_COLLISION 0x7c + +#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) + +#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) + +/* Definitions */ + +/* Shared Packet Processor resources */ +struct mvpp2 { + /* Shared registers' base addresses */ + void __iomem *lms_base; + void __iomem *iface_base; + + /* On PPv2.2, each "software thread" can access the base + * register through a separate address space, each 64 KB apart + * from each other. Typically, such address spaces will be + * used per CPU. + */ + void __iomem *swth_base[MVPP2_MAX_THREADS]; + + /* On PPv2.2, some port control registers are located into the system + * controller space. These registers are accessible through a regmap. + */ + struct regmap *sysctrl_base; + + /* Common clocks */ + struct clk *pp_clk; + struct clk *gop_clk; + struct clk *mg_clk; + struct clk *mg_core_clk; + struct clk *axi_clk; + + /* List of pointers to port structures */ + int port_count; + struct mvpp2_port *port_list[MVPP2_MAX_PORTS]; + + /* Aggregated TXQs */ + struct mvpp2_tx_queue *aggr_txqs; + + /* BM pools */ + struct mvpp2_bm_pool *bm_pools; + + /* PRS shadow table */ + struct mvpp2_prs_shadow *prs_shadow; + /* PRS auxiliary table for double vlan entries control */ + bool *prs_double_vlans; + + /* Tclk value */ + u32 tclk; + + /* HW version */ + enum { MVPP21, MVPP22 } hw_version; + + /* Maximum number of RXQs per port */ + unsigned int max_port_rxqs; + + /* Workqueue to gather hardware statistics */ + char queue_name[30]; + struct workqueue_struct *stats_queue; +}; + +struct mvpp2_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + +/* Per-CPU port control */ +struct mvpp2_port_pcpu { + struct hrtimer tx_done_timer; + bool timer_scheduled; + /* Tasklet for egress finalization */ + struct tasklet_struct tx_done_tasklet; +}; + +struct mvpp2_queue_vector { + int irq; + struct napi_struct napi; + enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; + int sw_thread_id; + u16 sw_thread_mask; + int first_rxq; + int nrxqs; + u32 pending_cause_rx; + struct mvpp2_port *port; +}; + +struct mvpp2_port { + u8 id; + + /* Index of the port from the "group of ports" complex point + * of view + */ + int gop_id; + + int link_irq; + + struct mvpp2 *priv; + + /* Firmware node associated to the port */ + struct fwnode_handle *fwnode; + + /* Is a PHY always connected to the port */ + bool has_phy; + + /* Per-port registers' base address */ + void __iomem *base; + void __iomem *stats_base; + + struct mvpp2_rx_queue **rxqs; + unsigned int nrxqs; + struct mvpp2_tx_queue **txqs; + unsigned int ntxqs; + struct net_device *dev; + + int pkt_size; + + /* Per-CPU port control */ + struct mvpp2_port_pcpu __percpu *pcpu; + + /* Flags */ + unsigned long flags; + + u16 tx_ring_size; + u16 rx_ring_size; + struct mvpp2_pcpu_stats __percpu *stats; + u64 *ethtool_stats; + + /* Per-port work and its lock to gather hardware statistics */ + struct mutex gather_stats_lock; + struct delayed_work stats_work; + + struct device_node *of_node; + + phy_interface_t phy_interface; + struct phylink *phylink; + struct phy *comphy; + + struct mvpp2_bm_pool *pool_long; + struct mvpp2_bm_pool *pool_short; + + /* Index of first port's physical RXQ */ + u8 first_rxq; + + struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; + unsigned int nqvecs; + bool has_tx_irqs; + + u32 tx_time_coal; +}; + +/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVPP2_TXD_L3_OFF_SHIFT 0 +#define MVPP2_TXD_IP_HLEN_SHIFT 8 +#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) +#define MVPP2_TXD_L4_CSUM_NOT BIT(14) +#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) +#define MVPP2_TXD_PADDING_DISABLE BIT(23) +#define MVPP2_TXD_L4_UDP BIT(24) +#define MVPP2_TXD_L3_IP6 BIT(26) +#define MVPP2_TXD_L_DESC BIT(28) +#define MVPP2_TXD_F_DESC BIT(29) + +#define MVPP2_RXD_ERR_SUMMARY BIT(15) +#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) +#define MVPP2_RXD_ERR_CRC 0x0 +#define MVPP2_RXD_ERR_OVERRUN BIT(13) +#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) +#define MVPP2_RXD_BM_POOL_ID_OFFS 16 +#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) +#define MVPP2_RXD_HWF_SYNC BIT(21) +#define MVPP2_RXD_L4_CSUM_OK BIT(22) +#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) +#define MVPP2_RXD_L4_TCP BIT(25) +#define MVPP2_RXD_L4_UDP BIT(26) +#define MVPP2_RXD_L3_IP4 BIT(28) +#define MVPP2_RXD_L3_IP6 BIT(30) +#define MVPP2_RXD_BUF_HDR BIT(31) + +/* HW TX descriptor for PPv2.1 */ +struct mvpp21_tx_desc { + u32 command; /* Options used by HW for packet transmitting.*/ + u8 packet_offset; /* the offset from the buffer beginning */ + u8 phys_txq; /* destination queue ID */ + u16 data_size; /* data size of transmitted packet in bytes */ + u32 buf_dma_addr; /* physical addr of transmitted buffer */ + u32 buf_cookie; /* cookie for access to TX buffer in tx path */ + u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + u32 reserved2; /* reserved (for future use) */ +}; + +/* HW RX descriptor for PPv2.1 */ +struct mvpp21_rx_desc { + u32 status; /* info about received packet */ + u16 reserved1; /* parser_info (for future use, PnC) */ + u16 data_size; /* size of received packet in bytes */ + u32 buf_dma_addr; /* physical address of the buffer */ + u32 buf_cookie; /* cookie for access to RX buffer in rx path */ + u16 reserved2; /* gem_port_id (for future use, PON) */ + u16 reserved3; /* csum_l4 (for future use, PnC) */ + u8 reserved4; /* bm_qset (for future use, BM) */ + u8 reserved5; + u16 reserved6; /* classify_info (for future use, PnC) */ + u32 reserved7; /* flow_id (for future use, PnC) */ + u32 reserved8; +}; + +/* HW TX descriptor for PPv2.2 */ +struct mvpp22_tx_desc { + u32 command; + u8 packet_offset; + u8 phys_txq; + u16 data_size; + u64 reserved1; + u64 buf_dma_addr_ptp; + u64 buf_cookie_misc; +}; + +/* HW RX descriptor for PPv2.2 */ +struct mvpp22_rx_desc { + u32 status; + u16 reserved1; + u16 data_size; + u32 reserved2; + u32 reserved3; + u64 buf_dma_addr_key_hash; + u64 buf_cookie_misc; +}; + +/* Opaque type used by the driver to manipulate the HW TX and RX + * descriptors + */ +struct mvpp2_tx_desc { + union { + struct mvpp21_tx_desc pp21; + struct mvpp22_tx_desc pp22; + }; +}; + +struct mvpp2_rx_desc { + union { + struct mvpp21_rx_desc pp21; + struct mvpp22_rx_desc pp22; + }; +}; + +struct mvpp2_txq_pcpu_buf { + /* Transmitted SKB */ + struct sk_buff *skb; + + /* Physical address of transmitted buffer */ + dma_addr_t dma; + + /* Size transmitted */ + size_t size; +}; + +/* Per-CPU Tx queue control */ +struct mvpp2_txq_pcpu { + int cpu; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the + * descriptor ring + */ + int count; + + int wake_threshold; + int stop_threshold; + + /* Number of Tx DMA descriptors reserved for each CPU */ + int reserved_num; + + /* Infos about transmitted buffers */ + struct mvpp2_txq_pcpu_buf *buffs; + + /* Index of last TX DMA descriptor that was inserted */ + int txq_put_index; + + /* Index of the TX DMA descriptor to be cleaned up */ + int txq_get_index; + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; +}; + +struct mvpp2_tx_queue { + /* Physical number of this Tx queue */ + u8 id; + + /* Logical number of this Tx queue */ + u8 log_id; + + /* Number of Tx DMA descriptors in the descriptor ring */ + int size; + + /* Number of currently used Tx DMA descriptor in the descriptor ring */ + int count; + + /* Per-CPU control of physical Tx queues */ + struct mvpp2_txq_pcpu __percpu *pcpu; + + u32 done_pkts_coal; + + /* Virtual address of thex Tx DMA descriptors array */ + struct mvpp2_tx_desc *descs; + + /* DMA address of the Tx DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last Tx DMA descriptor */ + int last_desc; + + /* Index of the next Tx DMA descriptor to process */ + int next_desc_to_proc; +}; + +struct mvpp2_rx_queue { + /* RX queue number, in the range 0-31 for physical RXQs */ + u8 id; + + /* Num of rx descriptors in the rx descriptor ring */ + int size; + + u32 pkts_coal; + u32 time_coal; + + /* Virtual address of the RX DMA descriptors array */ + struct mvpp2_rx_desc *descs; + + /* DMA address of the RX DMA descriptors array */ + dma_addr_t descs_dma; + + /* Index of the last RX DMA descriptor */ + int last_desc; + + /* Index of the next RX DMA descriptor to process */ + int next_desc_to_proc; + + /* ID of port to which physical RXQ is mapped */ + int port; + + /* Port's logic RXQ number to which physical RXQ is mapped */ + int logic_rxq; +}; + +struct mvpp2_bm_pool { + /* Pool number in the range 0-7 */ + int id; + + /* Buffer Pointers Pool External (BPPE) size */ + int size; + /* BPPE size in bytes */ + int size_bytes; + /* Number of buffers for this pool */ + int buf_num; + /* Pool buffer size */ + int buf_size; + /* Packet size */ + int pkt_size; + int frag_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE DMA base address */ + dma_addr_t dma_addr; + + /* Ports using BM pool */ + u32 port_map; +}; + +#define IS_TSO_HEADER(txq_pcpu, addr) \ + ((addr) >= (txq_pcpu)->tso_headers_dma && \ + (addr) < (txq_pcpu)->tso_headers_dma + \ + (txq_pcpu)->size * TSO_HEADER_SIZE) + +#define MVPP2_DRIVER_NAME "mvpp2" +#define MVPP2_DRIVER_VERSION "1.0" + +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data); +u32 mvpp2_read(struct mvpp2 *priv, u32 offset); + +u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset); + +void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data); +u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset); + +void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, u32 offset, + u32 data); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c new file mode 100644 index 000000000000..8581d5b17dd5 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -0,0 +1,141 @@ +/* + * RSS and Classifier helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include "mvpp2.h" +#include "mvpp2_cls.h" + +/* Update classification flow table registers */ +static void mvpp2_cls_flow_write(struct mvpp2 *priv, + struct mvpp2_cls_flow_entry *fe) +{ + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); +} + +/* Update classification lookup table register */ +static void mvpp2_cls_lookup_write(struct mvpp2 *priv, + struct mvpp2_cls_lookup_entry *le) +{ + u32 val; + + val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); +} + +/* Classifier default initialization */ +void mvpp2_cls_init(struct mvpp2 *priv) +{ + struct mvpp2_cls_lookup_entry le; + struct mvpp2_cls_flow_entry fe; + int index; + + /* Enable classifier */ + mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); + + /* Clear classifier flow table */ + memset(&fe.data, 0, sizeof(fe.data)); + for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { + fe.index = index; + mvpp2_cls_flow_write(priv, &fe); + } + + /* Clear classifier lookup table */ + le.data = 0; + for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { + le.lkpid = index; + le.way = 0; + mvpp2_cls_lookup_write(priv, &le); + + le.way = 1; + mvpp2_cls_lookup_write(priv, &le); + } +} + +void mvpp2_cls_port_config(struct mvpp2_port *port) +{ + struct mvpp2_cls_lookup_entry le; + u32 val; + + /* Set way for the port */ + val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); + val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); + + /* Pick the entry to be accessed in lookup ID decoding table + * according to the way and lkpid. + */ + le.lkpid = port->id; + le.way = 0; + le.data = 0; + + /* Set initial CPU queue for receiving packets */ + le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; + le.data |= port->first_rxq; + + /* Disable classification engines */ + le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + /* Update lookup ID table entry */ + mvpp2_cls_lookup_write(port->priv, &le); +} + +/* Set CPU queue number for oversize packets */ +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) +{ + u32 val; + + mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), + port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); + + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); + + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); + val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); +} + +void mvpp22_init_rss(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int i; + + /* Set the table width: replace the whole classifier Rx queue number + * with the ones configured in RSS table entries. + */ + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); + mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); + + /* Loop through the classifier Rx Queues and map them to a RSS table. + * Map them all to the first table (0) by default. + */ + for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); + mvpp2_write(priv, MVPP22_RSS_TABLE, + MVPP22_RSS_TABLE_POINTER(0)); + } + + /* Configure the first table to evenly distribute the packets across + * real Rx Queues. The table entries map a hash to an port Rx Queue. + */ + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(0) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); + } + +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h new file mode 100644 index 000000000000..8e1d7f9ffa0b --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h @@ -0,0 +1,44 @@ +/* + * RSS and Classifier definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MVPP2_CLS_H_ +#define _MVPP2_CLS_H_ + +/* Classifier constants */ +#define MVPP2_CLS_FLOWS_TBL_SIZE 512 +#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 +#define MVPP2_CLS_LKP_TBL_SIZE 64 +#define MVPP2_CLS_RX_QUEUES 256 + +/* RSS constants */ +#define MVPP22_RSS_TABLE_ENTRIES 32 + +struct mvpp2_cls_flow_entry { + u32 index; + u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; +}; + +struct mvpp2_cls_lookup_entry { + u32 lkpid; + u32 way; + u32 data; +}; + +void mvpp22_init_rss(struct mvpp2_port *port); + +void mvpp2_cls_init(struct mvpp2 *priv); + +void mvpp2_cls_port_config(struct mvpp2_port *port); + +void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); + +#endif diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c new file mode 100644 index 000000000000..0319ed9ef8b8 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -0,0 +1,5281 @@ +/* + * Driver for Marvell PPv2 network controller for Armada 375 SoC. + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/acpi.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <linux/inetdevice.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/mfd/syscon.h> +#include <linux/interrupt.h> +#include <linux/cpumask.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/phy/phy.h> +#include <linux/clk.h> +#include <linux/hrtimer.h> +#include <linux/ktime.h> +#include <linux/regmap.h> +#include <uapi/linux/ppp_defs.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/tso.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" +#include "mvpp2_cls.h" + +enum mvpp2_bm_pool_log_num { + MVPP2_BM_SHORT, + MVPP2_BM_LONG, + MVPP2_BM_JUMBO, + MVPP2_BM_POOLS_NUM +}; + +static struct { + int pkt_size; + int buf_num; +} mvpp2_pools[MVPP2_BM_POOLS_NUM]; + +/* The prototype is added here to be used in start_dev when using ACPI. This + * will be removed once phylink is used for all modes (dt+ACPI). + */ +static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, + const struct phylink_link_state *state); + +/* Queue modes */ +#define MVPP2_QDIST_SINGLE_MODE 0 +#define MVPP2_QDIST_MULTI_MODE 1 + +static int queue_mode = MVPP2_QDIST_SINGLE_MODE; + +module_param(queue_mode, int, 0444); +MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); + +/* Utility/helper methods */ + +void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) +{ + writel(data, priv->swth_base[0] + offset); +} + +u32 mvpp2_read(struct mvpp2 *priv, u32 offset) +{ + return readl(priv->swth_base[0] + offset); +} + +u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) +{ + return readl_relaxed(priv->swth_base[0] + offset); +} +/* These accessors should be used to access: + * + * - per-CPU registers, where each CPU has its own copy of the + * register. + * + * MVPP2_BM_VIRT_ALLOC_REG + * MVPP2_BM_ADDR_HIGH_ALLOC + * MVPP22_BM_ADDR_HIGH_RLS_REG + * MVPP2_BM_VIRT_RLS_REG + * MVPP2_ISR_RX_TX_CAUSE_REG + * MVPP2_ISR_RX_TX_MASK_REG + * MVPP2_TXQ_NUM_REG + * MVPP2_AGGR_TXQ_UPDATE_REG + * MVPP2_TXQ_RSVD_REQ_REG + * MVPP2_TXQ_RSVD_RSLT_REG + * MVPP2_TXQ_SENT_REG + * MVPP2_RXQ_NUM_REG + * + * - global registers that must be accessed through a specific CPU + * window, because they are related to an access to a per-CPU + * register + * + * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) + * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG) + * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) + */ +void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel(data, priv->swth_base[cpu] + offset); +} + +u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl(priv->swth_base[cpu] + offset); +} + +void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, + u32 offset, u32 data) +{ + writel_relaxed(data, priv->swth_base[cpu] + offset); +} + +static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, + u32 offset) +{ + return readl_relaxed(priv->swth_base[cpu] + offset); +} + +static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.buf_dma_addr; + else + return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; +} + +static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + dma_addr_t dma_addr) +{ + dma_addr_t addr, offset; + + addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; + offset = dma_addr & MVPP2_TX_DESC_ALIGN; + + if (port->priv->hw_version == MVPP21) { + tx_desc->pp21.buf_dma_addr = addr; + tx_desc->pp21.packet_offset = offset; + } else { + u64 val = (u64)addr; + + tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; + tx_desc->pp22.buf_dma_addr_ptp |= val; + tx_desc->pp22.packet_offset = offset; + } +} + +static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.data_size; + else + return tx_desc->pp22.data_size; +} + +static void mvpp2_txdesc_size_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + size_t size) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.data_size = size; + else + tx_desc->pp22.data_size = size; +} + +static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int txq) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.phys_txq = txq; + else + tx_desc->pp22.phys_txq = txq; +} + +static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc, + unsigned int command) +{ + if (port->priv->hw_version == MVPP21) + tx_desc->pp21.command = command; + else + tx_desc->pp22.command = command; +} + +static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, + struct mvpp2_tx_desc *tx_desc) +{ + if (port->priv->hw_version == MVPP21) + return tx_desc->pp21.packet_offset; + else + return tx_desc->pp22.packet_offset; +} + +static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_dma_addr; + else + return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; +} + +static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.buf_cookie; + else + return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; +} + +static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.data_size; + else + return rx_desc->pp22.data_size; +} + +static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + if (port->priv->hw_version == MVPP21) + return rx_desc->pp21.status; + else + return rx_desc->pp22.status; +} + +static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) +{ + txq_pcpu->txq_get_index++; + if (txq_pcpu->txq_get_index == txq_pcpu->size) + txq_pcpu->txq_get_index = 0; +} + +static void mvpp2_txq_inc_put(struct mvpp2_port *port, + struct mvpp2_txq_pcpu *txq_pcpu, + struct sk_buff *skb, + struct mvpp2_tx_desc *tx_desc) +{ + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_put_index; + tx_buf->skb = skb; + tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); + tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + + mvpp2_txdesc_offset_get(port, tx_desc); + txq_pcpu->txq_put_index++; + if (txq_pcpu->txq_put_index == txq_pcpu->size) + txq_pcpu->txq_put_index = 0; +} + +/* Get number of physical egress port */ +static inline int mvpp2_egress_port(struct mvpp2_port *port) +{ + return MVPP2_MAX_TCONT + port->id; +} + +/* Get number of physical TXQ */ +static inline int mvpp2_txq_phys(int port, int txq) +{ + return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; +} + +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(pool->frag_size); + else + return kmalloc(pool->frag_size, GFP_ATOMIC); +} + +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) +{ + if (likely(pool->frag_size <= PAGE_SIZE)) + skb_free_frag(data); + else + kfree(data); +} + +/* Buffer Manager configuration routines */ + +/* Create pool */ +static int mvpp2_bm_pool_create(struct platform_device *pdev, + struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, int size) +{ + u32 val; + + /* Number of buffer pointers must be a multiple of 16, as per + * hardware constraints + */ + if (!IS_ALIGNED(size, 16)) + return -EINVAL; + + /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16 + * bytes per buffer pointer + */ + if (priv->hw_version == MVPP21) + bm_pool->size_bytes = 2 * sizeof(u32) * size; + else + bm_pool->size_bytes = 2 * sizeof(u64) * size; + + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes, + &bm_pool->dma_addr, + GFP_KERNEL); + if (!bm_pool->virt_addr) + return -ENOMEM; + + if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, + MVPP2_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + bm_pool->virt_addr, bm_pool->dma_addr); + dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", + bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); + return -ENOMEM; + } + + mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), + lower_32_bits(bm_pool->dma_addr)); + mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_START_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + bm_pool->size = size; + bm_pool->pkt_size = 0; + bm_pool->buf_num = 0; + + return 0; +} + +/* Set pool buffer size */ +static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + int buf_size) +{ + u32 val; + + bm_pool->buf_size = buf_size; + + val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); + mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); +} + +static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *dma_addr, + phys_addr_t *phys_addr) +{ + int cpu = get_cpu(); + + *dma_addr = mvpp2_percpu_read(priv, cpu, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + + if (priv->hw_version == MVPP22) { + u32 val; + u32 dma_addr_highbits, phys_addr_highbits; + + val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); + phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> + MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; + + if (sizeof(dma_addr_t) == 8) + *dma_addr |= (u64)dma_addr_highbits << 32; + + if (sizeof(phys_addr_t) == 8) + *phys_addr |= (u64)phys_addr_highbits << 32; + } + + put_cpu(); +} + +/* Free all buffers from the pool */ +static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, int buf_num) +{ + int i; + + if (buf_num > bm_pool->buf_num) { + WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n", + bm_pool->id, buf_num); + buf_num = bm_pool->buf_num; + } + + for (i = 0; i < buf_num; i++) { + dma_addr_t buf_dma_addr; + phys_addr_t buf_phys_addr; + void *data; + + mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, + &buf_dma_addr, &buf_phys_addr); + + dma_unmap_single(dev, buf_dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + + data = (void *)phys_to_virt(buf_phys_addr); + if (!data) + break; + + mvpp2_frag_free(bm_pool, data); + } + + /* Update BM driver with number of buffers removed from pool */ + bm_pool->buf_num -= i; +} + +/* Check number of buffers in BM pool */ +static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +{ + int buf_num = 0; + + buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) & + MVPP22_BM_POOL_PTRS_NUM_MASK; + buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) & + MVPP2_BM_BPPI_PTR_NUM_MASK; + + /* HW has one buffer ready which is not reflected in the counters */ + if (buf_num) + buf_num += 1; + + return buf_num; +} + +/* Cleanup pool */ +static int mvpp2_bm_pool_destroy(struct platform_device *pdev, + struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool) +{ + int buf_num; + u32 val; + + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num); + + /* Check buffer counters after free */ + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); + if (buf_num) { + WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n", + bm_pool->id, bm_pool->buf_num); + return 0; + } + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_STOP_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + dma_free_coherent(&pdev->dev, bm_pool->size_bytes, + bm_pool->virt_addr, + bm_pool->dma_addr); + return 0; +} + +static int mvpp2_bm_pools_init(struct platform_device *pdev, + struct mvpp2 *priv) +{ + int i, err, size; + struct mvpp2_bm_pool *bm_pool; + + /* Create all pools with maximum size */ + size = MVPP2_BM_POOL_SIZE_MAX; + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + bm_pool = &priv->bm_pools[i]; + bm_pool->id = i; + err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size); + if (err) + goto err_unroll_pools; + mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); + } + return 0; + +err_unroll_pools: + dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); + for (i = i - 1; i >= 0; i--) + mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]); + return err; +} + +static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + int i, err; + + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + /* Mask BM all interrupts */ + mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); + /* Clear BM cause register */ + mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); + } + + /* Allocate and initialize BM pools */ + priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM, + sizeof(*priv->bm_pools), GFP_KERNEL); + if (!priv->bm_pools) + return -ENOMEM; + + err = mvpp2_bm_pools_init(pdev, priv); + if (err < 0) + return err; + return 0; +} + +static void mvpp2_setup_bm_pool(void) +{ + /* Short pool */ + mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM; + mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE; + + /* Long pool */ + mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM; + mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE; + + /* Jumbo pool */ + mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM; + mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE; +} + +/* Attach long pool to rxq */ +static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, + int lrxq, int long_pool) +{ + u32 val, mask; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_LONG_MASK; + else + mask = MVPP22_RXQ_POOL_LONG_MASK; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Attach short pool to rxq */ +static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, + int lrxq, int short_pool) +{ + u32 val, mask; + int prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq]->id; + + if (port->priv->hw_version == MVPP21) + mask = MVPP21_RXQ_POOL_SHORT_MASK; + else + mask = MVPP22_RXQ_POOL_SHORT_MASK; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~mask; + val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +static void *mvpp2_buf_alloc(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, + dma_addr_t *buf_dma_addr, + phys_addr_t *buf_phys_addr, + gfp_t gfp_mask) +{ + dma_addr_t dma_addr; + void *data; + + data = mvpp2_frag_alloc(bm_pool); + if (!data) + return NULL; + + dma_addr = dma_map_single(port->dev->dev.parent, data, + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { + mvpp2_frag_free(bm_pool, data); + return NULL; + } + *buf_dma_addr = dma_addr; + *buf_phys_addr = virt_to_phys(data); + + return data; +} + +/* Release buffer to BM */ +static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, + dma_addr_t buf_dma_addr, + phys_addr_t buf_phys_addr) +{ + int cpu = get_cpu(); + + if (port->priv->hw_version == MVPP22) { + u32 val = 0; + + if (sizeof(dma_addr_t) == 8) + val |= upper_32_bits(buf_dma_addr) & + MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; + + if (sizeof(phys_addr_t) == 8) + val |= (upper_32_bits(buf_phys_addr) + << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & + MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; + + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP22_BM_ADDR_HIGH_RLS_REG, val); + } + + /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply + * returned in the "cookie" field of the RX + * descriptor. Instead of storing the virtual address, we + * store the physical address + */ + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); + mvpp2_percpu_write_relaxed(port->priv, cpu, + MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + + put_cpu(); +} + +/* Allocate buffers for the pool */ +static int mvpp2_bm_bufs_add(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, int buf_num) +{ + int i, buf_size, total_size; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + void *buf; + + buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size); + total_size = MVPP2_RX_TOTAL_SIZE(buf_size); + + if (buf_num < 0 || + (buf_num + bm_pool->buf_num > bm_pool->size)) { + netdev_err(port->dev, + "cannot allocate %d buffers for pool %d\n", + buf_num, bm_pool->id); + return 0; + } + + for (i = 0; i < buf_num; i++) { + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, + &phys_addr, GFP_KERNEL); + if (!buf) + break; + + mvpp2_bm_pool_put(port, bm_pool->id, dma_addr, + phys_addr); + } + + /* Update BM driver with number of buffers added to pool */ + bm_pool->buf_num += i; + + netdev_dbg(port->dev, + "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n", + bm_pool->id, bm_pool->pkt_size, buf_size, total_size); + + netdev_dbg(port->dev, + "pool %d: %d of %d buffers added\n", + bm_pool->id, i, buf_num); + return i; +} + +/* Notify the driver that BM pool is being used as specific type and return the + * pool pointer on success + */ +static struct mvpp2_bm_pool * +mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size) +{ + struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; + int num; + + if (pool >= MVPP2_BM_POOLS_NUM) { + netdev_err(port->dev, "Invalid pool %d\n", pool); + return NULL; + } + + /* Allocate buffers in case BM pool is used as long pool, but packet + * size doesn't match MTU or BM pool hasn't being used yet + */ + if (new_pool->pkt_size == 0) { + int pkts_num; + + /* Set default buffer number or free all the buffers in case + * the pool is not empty + */ + pkts_num = new_pool->buf_num; + if (pkts_num == 0) + pkts_num = mvpp2_pools[pool].buf_num; + else + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool, pkts_num); + + new_pool->pkt_size = pkt_size; + new_pool->frag_size = + SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) + + MVPP2_SKB_SHINFO_SIZE; + + /* Allocate buffers for this pool */ + num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); + if (num != pkts_num) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, pkts_num); + return NULL; + } + } + + mvpp2_bm_pool_bufsize_set(port->priv, new_pool, + MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); + + return new_pool; +} + +/* Initialize pools for swf */ +static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) +{ + int rxq; + enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool; + + /* If port pkt_size is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) { + long_log_pool = MVPP2_BM_JUMBO; + short_log_pool = MVPP2_BM_LONG; + } else { + long_log_pool = MVPP2_BM_LONG; + short_log_pool = MVPP2_BM_SHORT; + } + + if (!port->pool_long) { + port->pool_long = + mvpp2_bm_pool_use(port, long_log_pool, + mvpp2_pools[long_log_pool].pkt_size); + if (!port->pool_long) + return -ENOMEM; + + port->pool_long->port_map |= BIT(port->id); + + for (rxq = 0; rxq < port->nrxqs; rxq++) + mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); + } + + if (!port->pool_short) { + port->pool_short = + mvpp2_bm_pool_use(port, short_log_pool, + mvpp2_pools[short_log_pool].pkt_size); + if (!port->pool_short) + return -ENOMEM; + + port->pool_short->port_map |= BIT(port->id); + + for (rxq = 0; rxq < port->nrxqs; rxq++) + mvpp2_rxq_short_pool_set(port, rxq, + port->pool_short->id); + } + + return 0; +} + +static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + enum mvpp2_bm_pool_log_num new_long_pool; + int pkt_size = MVPP2_RX_PKT_SIZE(mtu); + + /* If port MTU is higher than 1518B: + * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool + * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool + */ + if (pkt_size > MVPP2_BM_LONG_PKT_SIZE) + new_long_pool = MVPP2_BM_JUMBO; + else + new_long_pool = MVPP2_BM_LONG; + + if (new_long_pool != port->pool_long->id) { + /* Remove port from old short & long pool */ + port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id, + port->pool_long->pkt_size); + port->pool_long->port_map &= ~BIT(port->id); + port->pool_long = NULL; + + port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id, + port->pool_short->pkt_size); + port->pool_short->port_map &= ~BIT(port->id); + port->pool_short = NULL; + + port->pkt_size = pkt_size; + + /* Add port to new short & long pool */ + mvpp2_swf_bm_pool_init(port); + + /* Update L4 checksum when jumbo enable/disable on port */ + if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + } else { + dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + } + } + + dev->mtu = mtu; + dev->wanted_features = dev->features; + + netdev_update_features(dev); + return 0; +} + +static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) +{ + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) +{ + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +/* Mask the current CPU's Rx/Tx interrupts + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ +static void mvpp2_interrupts_mask(void *arg) +{ + struct mvpp2_port *port = arg; + + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); +} + +/* Unmask the current CPU's Rx/Tx interrupts. + * Called by on_each_cpu(), guaranteed to run with migration disabled, + * using smp_processor_id() is OK. + */ +static void mvpp2_interrupts_unmask(void *arg) +{ + struct mvpp2_port *port = arg; + u32 val; + + val = MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + if (port->has_tx_irqs) + val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); +} + +static void +mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) +{ + u32 val; + int i; + + if (port->priv->hw_version != MVPP22) + return; + + if (mask) + val = 0; + else + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *v = port->qvecs + i; + + if (v->type != MVPP2_QUEUE_VECTOR_SHARED) + continue; + + mvpp2_percpu_write(port->priv, v->sw_thread_id, + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); + } +} + +/* Port configuration routines */ + +static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; + else if (port->gop_id == 3) + val |= GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); +} + +static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | + GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + if (port->gop_id > 1) { + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val &= ~GENCONF_CTRL0_PORT0_RGMII; + else if (port->gop_id == 3) + val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); + } +} + +static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + u32 val; + + /* XPCS */ + val = readl(xpcs + MVPP22_XPCS_CFG0); + val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | + MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); + val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); + writel(val, xpcs + MVPP22_XPCS_CFG0); + + /* MPCS */ + val = readl(mpcs + MVPP22_MPCS_CTRL); + val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; + writel(val, mpcs + MVPP22_MPCS_CTRL); + + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | + MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); + val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + + val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; + val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); +} + +static int mvpp22_gop_init(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + if (!priv->sysctrl_base) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (port->gop_id == 0) + goto invalid_conf; + mvpp22_gop_init_rgmii(port); + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + mvpp22_gop_init_sgmii(port); + break; + case PHY_INTERFACE_MODE_10GKR: + if (port->gop_id != 0) + goto invalid_conf; + mvpp22_gop_init_10gkr(port); + break; + default: + goto unsupported_conf; + } + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); + val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | + GENCONF_PORT_CTRL1_EN(port->gop_id); + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); + val |= GENCONF_SOFT_RESET1_GOP; + regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); + +unsupported_conf: + return 0; + +invalid_conf: + netdev_err(port->dev, "Invalid port configuration\n"); + return -EINVAL; +} + +static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + /* Enable the GMAC link status irq for this port */ + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } + + if (port->gop_id == 0) { + /* Enable the XLG/GIG irqs for this port */ + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) + val |= MVPP22_XLG_EXT_INT_MASK_XLG; + else + val |= MVPP22_XLG_EXT_INT_MASK_GIG; + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } +} + +static void mvpp22_gop_mask_irq(struct mvpp2_port *port) +{ + u32 val; + + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); + val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | + MVPP22_XLG_EXT_INT_MASK_GIG); + writel(val, port->base + MVPP22_XLG_EXT_INT_MASK); + } + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); + val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); + } +} + +static void mvpp22_gop_setup_irq(struct mvpp2_port *port) +{ + u32 val; + + if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + val = readl(port->base + MVPP22_GMAC_INT_MASK); + val |= MVPP22_GMAC_INT_MASK_LINK_STAT; + writel(val, port->base + MVPP22_GMAC_INT_MASK); + } + + if (port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_INT_MASK); + val |= MVPP22_XLG_INT_MASK_LINK; + writel(val, port->base + MVPP22_XLG_INT_MASK); + } + + mvpp22_gop_unmask_irq(port); +} + +/* Sets the PHY mode of the COMPHY (which configures the serdes lanes). + * + * The PHY mode used by the PPv2 driver comes from the network subsystem, while + * the one given to the COMPHY comes from the generic PHY subsystem. Hence they + * differ. + * + * The COMPHY configures the serdes lanes regardless of the actual use of the + * lanes by the physical layer. This is why configurations like + * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid. + */ +static int mvpp22_comphy_init(struct mvpp2_port *port) +{ + enum phy_mode mode; + int ret; + + if (!port->comphy) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + mode = PHY_MODE_SGMII; + break; + case PHY_INTERFACE_MODE_2500BASEX: + mode = PHY_MODE_2500SGMII; + break; + case PHY_INTERFACE_MODE_10GKR: + mode = PHY_MODE_10GKR; + break; + default: + return -EINVAL; + } + + ret = phy_set_mode(port->comphy, mode); + if (ret) + return ret; + + return phy_power_on(port->comphy); +} + +static void mvpp2_port_enable(struct mvpp2_port *port) +{ + u32 val; + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_CTRL0_PORT_EN | + MVPP22_XLG_CTRL0_MAC_RESET_DIS; + val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val |= MVPP2_GMAC_PORT_EN_MASK; + val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } +} + +static void mvpp2_port_disable(struct mvpp2_port *port) +{ + u32 val; + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_PORT_EN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + + /* Disable & reset should be done separately */ + val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } +} + +/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ +static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & + ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +/* Configure loopback port */ +static void mvpp2_port_loopback_set(struct mvpp2_port *port, + const struct phylink_link_state *state) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + + if (state->speed == 1000) + val |= MVPP2_GMAC_GMII_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; + + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) + val |= MVPP2_GMAC_PCS_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; + + writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); +} + +struct mvpp2_ethtool_counter { + unsigned int offset; + const char string[ETH_GSTRING_LEN]; + bool reg_is_64b; +}; + +static u64 mvpp2_read_count(struct mvpp2_port *port, + const struct mvpp2_ethtool_counter *counter) +{ + u64 val; + + val = readl(port->stats_base + counter->offset); + if (counter->reg_is_64b) + val += (u64)readl(port->stats_base + counter->offset + 4) << 32; + + return val; +} + +/* Due to the fact that software statistics and hardware statistics are, by + * design, incremented at different moments in the chain of packet processing, + * it is very likely that incoming packets could have been dropped after being + * counted by hardware but before reaching software statistics (most probably + * multicast packets), and in the oppposite way, during transmission, FCS bytes + * are added in between as well as TSO skb will be split and header bytes added. + * Hence, statistics gathered from userspace with ifconfig (software) and + * ethtool (hardware) cannot be compared. + */ +static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { + { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, + { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, + { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, + { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, + { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, + { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, + { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, + { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, + { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, + { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, + { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, + { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, + { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, + { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, + { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, + { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, + { MVPP2_MIB_FC_SENT, "fc_sent" }, + { MVPP2_MIB_FC_RCVD, "fc_received" }, + { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, + { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, + { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, + { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, + { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, + { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, + { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, + { MVPP2_MIB_COLLISION, "collision" }, + { MVPP2_MIB_LATE_COLLISION, "late_collision" }, +}; + +static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + if (sset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + memcpy(data + i * ETH_GSTRING_LEN, + &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); + } +} + +static void mvpp2_gather_hw_statistics(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, + stats_work); + u64 *pstats; + int i; + + mutex_lock(&port->gather_stats_lock); + + pstats = port->ethtool_stats; + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); + + /* No need to read again the counters right after this function if it + * was called asynchronously by the user (ie. use of ethtool). + */ + cancel_delayed_work(&port->stats_work); + queue_delayed_work(port->priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + mutex_unlock(&port->gather_stats_lock); +} + +static void mvpp2_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Update statistics for the given port, then take the lock to avoid + * concurrent accesses on the ethtool_stats structure during its copy. + */ + mvpp2_gather_hw_statistics(&port->stats_work.work); + + mutex_lock(&port->gather_stats_lock); + memcpy(data, port->ethtool_stats, + sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); + mutex_unlock(&port->gather_stats_lock); +} + +static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mvpp2_ethtool_regs); + + return -EOPNOTSUPP; +} + +static void mvpp2_port_reset(struct mvpp2_port *port) +{ + u32 val; + unsigned int i; + + /* Read the GOP statistics to reset the hardware counters */ + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + ~MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; +} + +/* Change maximum receive size of the port */ +static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; + val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP2_GMAC_MAX_RX_SIZE_OFFS); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); +} + +/* Change maximum receive size of the port */ +static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP22_XLG_CTRL1_REG); + val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; + val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS; + writel(val, port->base + MVPP22_XLG_CTRL1_REG); +} + +/* Set defaults to the MVPP2 port */ +static void mvpp2_defaults_set(struct mvpp2_port *port) +{ + int tx_port_num, val, queue, ptxq, lrxq; + + if (port->priv->hw_version == MVPP21) { + /* Update TX FIFO MIN Threshold */ + val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + } + + /* Disable Legacy WRR, Disable EJP, Release from reset */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, + tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + + /* Close bandwidth for all queues */ + for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { + ptxq = mvpp2_txq_phys(port->id, queue); + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); + } + + /* Set refill period to 1 usec, refill tokens + * and bucket size to maximum + */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, + port->priv->tclk / USEC_PER_SEC); + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); + val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); + val = MVPP2_TXP_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + + /* Set MaximumLowLatencyPacketSize value to 256 */ + mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), + MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | + MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); + + /* Enable Rx cache snoop */ + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_SNOOP_PKT_SIZE_MASK | + MVPP2_SNOOP_BUF_HDR_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } + + /* At default, mask all interrupts to all present cpus */ + mvpp2_interrupts_disable(port); +} + +/* Enable/disable receiving packets */ +static void mvpp2_ingress_enable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val &= ~MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +static void mvpp2_ingress_disable(struct mvpp2_port *port) +{ + u32 val; + int lrxq, queue; + + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { + queue = port->rxqs[lrxq]->id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +/* Enable transmit via physical egress queue + * - HW starts take descriptors from DRAM + */ +static void mvpp2_egress_enable(struct mvpp2_port *port) +{ + u32 qmap; + int queue; + int tx_port_num = mvpp2_egress_port(port); + + /* Enable all initialized TXs. */ + qmap = 0; + for (queue = 0; queue < port->ntxqs; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + if (txq->descs) + qmap |= (1 << queue); + } + + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); +} + +/* Disable transmit via physical egress queue + * - HW doesn't take descriptors from DRAM + */ +static void mvpp2_egress_disable(struct mvpp2_port *port) +{ + u32 reg_data; + int delay; + int tx_port_num = mvpp2_egress_port(port); + + /* Issue stop command for active channels only */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & + MVPP2_TXP_SCHED_ENQ_MASK; + if (reg_data != 0) + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, + (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); + + /* Wait for all Tx activity to terminate. */ + delay = 0; + do { + if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "Tx stop timed out, status=0x%08x\n", + reg_data); + break; + } + mdelay(1); + delay++; + + /* Check port TX Command register that all + * Tx queues are stopped + */ + reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); + } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); +} + +/* Rx descriptors helper methods */ + +/* Get number of Rx descriptors occupied by received packets */ +static inline int +mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) +{ + u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); + + return val & MVPP2_RXQ_OCCUPIED_MASK; +} + +/* Update Rx queue status with the number of occupied and available + * Rx descriptor slots. + */ +static inline void +mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, + int used_count, int free_count) +{ + /* Decrement the number of used descriptors and increment count + * increment the number of free descriptors. + */ + u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); + + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); +} + +/* Get pointer to next RX descriptor to be processed by SW */ +static inline struct mvpp2_rx_desc * +mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) +{ + int rx_desc = rxq->next_desc_to_proc; + + rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); + prefetch(rxq->descs + rxq->next_desc_to_proc); + return rxq->descs + rx_desc; +} + +/* Set rx queue offset */ +static void mvpp2_rxq_offset_set(struct mvpp2_port *port, + int prxq, int offset) +{ + u32 val; + + /* Convert offset from bytes to units of 32 bytes */ + offset = offset >> 5; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; + + /* Offset is in */ + val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & + MVPP2_RXQ_PACKET_OFFSET_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Tx descriptors helper methods */ + +/* Get pointer to next Tx descriptor to be processed (send) by HW */ +static struct mvpp2_tx_desc * +mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) +{ + int tx_desc = txq->next_desc_to_proc; + + txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); + return txq->descs + tx_desc; +} + +/* Update HW with number of aggregated Tx descriptors to be sent + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) +{ + /* aggregated access - relevant TXQ number is written in TX desc */ + mvpp2_percpu_write(port->priv, smp_processor_id(), + MVPP2_AGGR_TXQ_UPDATE_REG, pending); +} + +/* Check if there are enough free descriptors in aggregated txq. + * If not, update the number of occupied descriptors and repeat the check. + * + * Called only from mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, + struct mvpp2_tx_queue *aggr_txq, int num) +{ + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { + /* Update number of occupied aggregated Tx descriptors */ + int cpu = smp_processor_id(); + u32 val = mvpp2_read_relaxed(priv, + MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + + aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; + + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) + return -ENOMEM; + } + return 0; +} + +/* Reserved Tx descriptors allocation request + * + * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called + * only by mvpp2_tx(), so migration is disabled, using + * smp_processor_id() is OK. + */ +static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, int num) +{ + u32 val; + int cpu = smp_processor_id(); + + val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; + mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + + val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + + return val & MVPP2_TXQ_RSVD_RSLT_MASK; +} + +/* Check if there are enough reserved descriptors for transmission. + * If not, request chunk of reserved descriptors and check again. + */ +static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int num) +{ + int req, cpu, desc_count; + + if (txq_pcpu->reserved_num >= num) + return 0; + + /* Not enough descriptors reserved! Update the reserved descriptor + * count and check again. + */ + + desc_count = 0; + /* Compute total of used descriptors */ + for_each_present_cpu(cpu) { + struct mvpp2_txq_pcpu *txq_pcpu_aux; + + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + desc_count += txq_pcpu_aux->count; + desc_count += txq_pcpu_aux->reserved_num; + } + + req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num); + desc_count += req; + + if (desc_count > + (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + return -ENOMEM; + + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + + /* OK, the descriptor could have been updated: check again. */ + if (txq_pcpu->reserved_num < num) + return -ENOMEM; + return 0; +} + +/* Release the last allocated Tx descriptor. Useful to handle DMA + * mapping failures in the Tx path. + */ +static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) +{ + if (txq->next_desc_to_proc == 0) + txq->next_desc_to_proc = txq->last_desc - 1; + else + txq->next_desc_to_proc--; +} + +/* Set Tx descriptors fields relevant for CSUM calculation */ +static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto, + int ip_hdr_len, int l4_proto) +{ + u32 command; + + /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, + * G_L4_chk, L4_type required only for checksum calculation + */ + command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); + command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); + command |= MVPP2_TXD_IP_CSUM_DISABLE; + + if (l3_proto == swab16(ETH_P_IP)) { + command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ + command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ + } else { + command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ + } + + if (l4_proto == IPPROTO_TCP) { + command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else if (l4_proto == IPPROTO_UDP) { + command |= MVPP2_TXD_L4_UDP; /* enable UDP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else { + command |= MVPP2_TXD_L4_CSUM_NOT; + } + + return command; +} + +/* Get number of sent descriptors and decrement counter. + * The number of sent descriptors is returned. + * Per-CPU access + * + * Called only from mvpp2_txq_done(), called from mvpp2_tx() + * (migration disabled) and from the TX completion tasklet (migration + * disabled) so using smp_processor_id() is OK. + */ +static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + + /* Reading status reg resets transmitted descriptor counter */ + val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(txq->id)); + + return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> + MVPP2_TRANSMITTED_COUNT_OFFSET; +} + +/* Called through on_each_cpu(), so runs on all CPUs, with migration + * disabled, therefore using smp_processor_id() is OK. + */ +static void mvpp2_txq_sent_counter_clear(void *arg) +{ + struct mvpp2_port *port = arg; + int queue; + + for (queue = 0; queue < port->ntxqs; queue++) { + int id = port->txqs[queue]->id; + + mvpp2_percpu_read(port->priv, smp_processor_id(), + MVPP2_TXQ_SENT_REG(id)); + } +} + +/* Set max sizes for Tx queues */ +static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) +{ + u32 val, size, mtu; + int txq, tx_port_num; + + mtu = port->pkt_size * 8; + if (mtu > MVPP2_TXP_MTU_MAX) + mtu = MVPP2_TXP_MTU_MAX; + + /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ + mtu = 3 * mtu; + + /* Indirect access to registers */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + /* Set MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); + val &= ~MVPP2_TXP_MTU_MAX; + val |= mtu; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); + + /* TXP token size and all TXQs token size must be larger that MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); + size = val & MVPP2_TXP_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + } + + for (txq = 0; txq < port->ntxqs; txq++) { + val = mvpp2_read(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); + size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; + + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), + val); + } + } +} + +/* Set the number of packets that will be received before Rx interrupt + * will be generated by HW. + */ +static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int cpu = get_cpu(); + + if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) + rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; + + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + rxq->pkts_coal); + + put_cpu(); +} + +/* For some reason in the LSP this is done on each CPU. Why ? */ +static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + int cpu = get_cpu(); + u32 val; + + if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) + txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; + + val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + + put_cpu(); +} + +static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) +{ + u64 tmp = (u64)clk_hz * usec; + + do_div(tmp, USEC_PER_SEC); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz) +{ + u64 tmp = (u64)cycles * USEC_PER_SEC; + + do_div(tmp, clk_hz); + + return tmp > U32_MAX ? U32_MAX : tmp; +} + +/* Set the time delay in usec before Rx interrupt */ +static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + + if (val > MVPP2_MAX_ISR_RX_THRESHOLD) { + rxq->time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(rxq->time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); +} + +static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + + if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { + port->tx_time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); +} + +/* Free Tx queue skbuffs */ +static void mvpp2_txq_bufs_free(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu, int num) +{ + int i; + + for (i = 0; i < num; i++) { + struct mvpp2_txq_pcpu_buf *tx_buf = + txq_pcpu->buffs + txq_pcpu->txq_get_index; + + if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) + dma_unmap_single(port->dev->dev.parent, tx_buf->dma, + tx_buf->size, DMA_TO_DEVICE); + if (tx_buf->skb) + dev_kfree_skb_any(tx_buf->skb); + + mvpp2_txq_inc_get(txq_pcpu); + } +} + +static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause) - 1; + + return port->rxqs[queue]; +} + +static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, + u32 cause) +{ + int queue = fls(cause) - 1; + + return port->txqs[queue]; +} + +/* Handle end of transmission */ +static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); + int tx_done; + + if (txq_pcpu->cpu != smp_processor_id()) + netdev_err(port->dev, "wrong cpu on the end of Tx processing\n"); + + tx_done = mvpp2_txq_sent_desc_proc(port, txq); + if (!tx_done) + return; + mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done); + + txq_pcpu->count -= tx_done; + + if (netif_tx_queue_stopped(nq)) + if (txq_pcpu->count <= txq_pcpu->wake_threshold) + netif_tx_wake_queue(nq); +} + +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, + int cpu) +{ + struct mvpp2_tx_queue *txq; + struct mvpp2_txq_pcpu *txq_pcpu; + unsigned int tx_todo = 0; + + while (cause) { + txq = mvpp2_get_tx_queue(port, cause); + if (!txq) + break; + + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + + if (txq_pcpu->count) { + mvpp2_txq_done(port, txq, txq_pcpu); + tx_todo += txq_pcpu->count; + } + + cause &= ~(1 << txq->log_id); + } + return tx_todo; +} + +/* Rx/Tx queue initialization/cleanup methods */ + +/* Allocate and initialize descriptors for aggr TXQ */ +static int mvpp2_aggr_txq_init(struct platform_device *pdev, + struct mvpp2_tx_queue *aggr_txq, int cpu, + struct mvpp2 *priv) +{ + u32 txq_dma; + + /* Allocate memory for TX descriptors */ + aggr_txq->descs = dma_zalloc_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + &aggr_txq->descs_dma, GFP_KERNEL); + if (!aggr_txq->descs) + return -ENOMEM; + + aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; + + /* Aggr TXQ no reset WA */ + aggr_txq->next_desc_to_proc = mvpp2_read(priv, + MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + + /* Set Tx descriptors queue starting address indirect + * access + */ + if (priv->hw_version == MVPP21) + txq_dma = aggr_txq->descs_dma; + else + txq_dma = aggr_txq->descs_dma >> + MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; + + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + MVPP2_AGGR_TXQ_SIZE); + + return 0; +} + +/* Create a specified Rx queue */ +static int mvpp2_rxq_init(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) + +{ + u32 rxq_dma; + int cpu; + + rxq->size = port->rx_ring_size; + + /* Allocate memory for RX descriptors */ + rxq->descs = dma_alloc_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + &rxq->descs_dma, GFP_KERNEL); + if (!rxq->descs) + return -ENOMEM; + + rxq->last_desc = rxq->size - 1; + + /* Zero occupied and non-occupied counters - direct access */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + + /* Set Rx descriptors queue starting address - indirect access */ + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + if (port->priv->hw_version == MVPP21) + rxq_dma = rxq->descs_dma; + else + rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + put_cpu(); + + /* Set Offset */ + mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); + + /* Set coalescing pkts and time */ + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); + + /* Add number of descriptors ready for receiving packets */ + mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); + + return 0; +} + +/* Push packets received by the RXQ to BM pool */ +static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int rx_received, i; + + rx_received = mvpp2_rxq_received(port, rxq->id); + if (!rx_received) + return; + + for (i = 0; i < rx_received; i++) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + int pool; + + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + + mvpp2_bm_pool_put(port, pool, + mvpp2_rxdesc_dma_addr_get(port, rx_desc), + mvpp2_rxdesc_cookie_get(port, rx_desc)); + } + mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); +} + +/* Cleanup Rx queue */ +static void mvpp2_rxq_deinit(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + int cpu; + + mvpp2_rxq_drop_pkts(port, rxq); + + if (rxq->descs) + dma_free_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + rxq->descs, + rxq->descs_dma); + + rxq->descs = NULL; + rxq->last_desc = 0; + rxq->next_desc_to_proc = 0; + rxq->descs_dma = 0; + + /* Clear Rx descriptors queue starting address and size; + * free descriptor number + */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + put_cpu(); +} + +/* Create and initialize a Tx queue */ +static int mvpp2_txq_init(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + u32 val; + int cpu, desc, desc_per_txq, tx_port_num; + struct mvpp2_txq_pcpu *txq_pcpu; + + txq->size = port->tx_ring_size; + + /* Allocate memory for Tx descriptors */ + txq->descs = dma_alloc_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + &txq->descs_dma, GFP_KERNEL); + if (!txq->descs) + return -ENOMEM; + + txq->last_desc = txq->size - 1; + + /* Set Tx descriptors queue starting address - indirect access */ + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + txq->descs_dma); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + txq->size & MVPP2_TXQ_DESC_SIZE_MASK); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); + val &= ~MVPP2_TXQ_PENDING_MASK; + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); + + /* Calculate base address in prefetch buffer. We reserve 16 descriptors + * for each existing TXQ. + * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT + * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS + */ + desc_per_txq = 16; + desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + + (txq->log_id * desc_per_txq); + + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | + MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); + put_cpu(); + + /* WRR / EJP configuration - indirect access */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); + val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); + + val = MVPP2_TXQ_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), + val); + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu->size = txq->size; + txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, + sizeof(*txq_pcpu->buffs), + GFP_KERNEL); + if (!txq_pcpu->buffs) + return -ENOMEM; + + txq_pcpu->count = 0; + txq_pcpu->reserved_num = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + txq_pcpu->tso_headers = NULL; + + txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; + txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; + + txq_pcpu->tso_headers = + dma_alloc_coherent(port->dev->dev.parent, + txq_pcpu->size * TSO_HEADER_SIZE, + &txq_pcpu->tso_headers_dma, + GFP_KERNEL); + if (!txq_pcpu->tso_headers) + return -ENOMEM; + } + + return 0; +} + +/* Free allocated TXQ resources */ +static void mvpp2_txq_deinit(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + int cpu; + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + kfree(txq_pcpu->buffs); + + if (txq_pcpu->tso_headers) + dma_free_coherent(port->dev->dev.parent, + txq_pcpu->size * TSO_HEADER_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); + + txq_pcpu->tso_headers = NULL; + } + + if (txq->descs) + dma_free_coherent(port->dev->dev.parent, + txq->size * MVPP2_DESC_ALIGNED_SIZE, + txq->descs, txq->descs_dma); + + txq->descs = NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; + txq->descs_dma = 0; + + /* Set minimum bandwidth for disabled TXQs */ + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); + + /* Set Tx descriptors queue starting address and size */ + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + put_cpu(); +} + +/* Cleanup Tx ports */ +static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu; + int delay, pending, cpu; + u32 val; + + cpu = get_cpu(); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); + val |= MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + + /* The napi queue has been stopped so wait for all packets + * to be transmitted. + */ + delay = 0; + do { + if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { + netdev_warn(port->dev, + "port %d: cleaning queue %d timed out\n", + port->id, txq->log_id); + break; + } + mdelay(1); + delay++; + + pending = mvpp2_percpu_read(port->priv, cpu, + MVPP2_TXQ_PENDING_REG); + pending &= MVPP2_TXQ_PENDING_MASK; + } while (pending); + + val &= ~MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + put_cpu(); + + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + + /* Release all packets */ + mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); + + /* Reset queue */ + txq_pcpu->count = 0; + txq_pcpu->txq_put_index = 0; + txq_pcpu->txq_get_index = 0; + } +} + +/* Cleanup all Tx queues */ +static void mvpp2_cleanup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue; + u32 val; + + val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); + + /* Reset Tx ports and delete Tx queues */ + val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); + + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_txq_clean(port, txq); + mvpp2_txq_deinit(port, txq); + } + + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + + val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); + mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); +} + +/* Cleanup all Rx queues */ +static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) +{ + int queue; + + for (queue = 0; queue < port->nrxqs; queue++) + mvpp2_rxq_deinit(port, port->rxqs[queue]); +} + +/* Init all Rx queues for port */ +static int mvpp2_setup_rxqs(struct mvpp2_port *port) +{ + int queue, err; + + for (queue = 0; queue < port->nrxqs; queue++) { + err = mvpp2_rxq_init(port, port->rxqs[queue]); + if (err) + goto err_cleanup; + } + return 0; + +err_cleanup: + mvpp2_cleanup_rxqs(port); + return err; +} + +/* Init all tx queues for port */ +static int mvpp2_setup_txqs(struct mvpp2_port *port) +{ + struct mvpp2_tx_queue *txq; + int queue, err; + + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + err = mvpp2_txq_init(port, txq); + if (err) + goto err_cleanup; + } + + if (port->has_tx_irqs) { + mvpp2_tx_time_coal_set(port); + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_tx_pkts_coal_set(port, txq); + } + } + + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); + return 0; + +err_cleanup: + mvpp2_cleanup_txqs(port); + return err; +} + +/* The callback for per-port interrupt */ +static irqreturn_t mvpp2_isr(int irq, void *dev_id) +{ + struct mvpp2_queue_vector *qv = dev_id; + + mvpp2_qvec_interrupt_disable(qv); + + napi_schedule(&qv->napi); + + return IRQ_HANDLED; +} + +/* Per-port interrupt for link status changes */ +static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) +{ + struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + struct net_device *dev = port->dev; + bool event = false, link = false; + u32 val; + + mvpp22_gop_mask_irq(port); + + if (port->gop_id == 0 && + port->phy_interface == PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP22_XLG_INT_STAT); + if (val & MVPP22_XLG_INT_STAT_LINK) { + event = true; + val = readl(port->base + MVPP22_XLG_STATUS); + if (val & MVPP22_XLG_STATUS_LINK_UP) + link = true; + } + } else if (phy_interface_mode_is_rgmii(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII || + port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || + port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + val = readl(port->base + MVPP22_GMAC_INT_STAT); + if (val & MVPP22_GMAC_INT_STAT_LINK) { + event = true; + val = readl(port->base + MVPP2_GMAC_STATUS0); + if (val & MVPP2_GMAC_STATUS0_LINK_UP) + link = true; + } + } + + if (port->phylink) { + phylink_mac_change(port->phylink, link); + goto handled; + } + + if (!netif_running(dev) || !event) + goto handled; + + if (link) { + mvpp2_interrupts_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } else { + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + mvpp2_ingress_disable(port); + mvpp2_egress_disable(port); + + mvpp2_interrupts_disable(port); + } + +handled: + mvpp22_gop_unmask_irq(port); + return IRQ_HANDLED; +} + +static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu) +{ + ktime_t interval; + + if (!port_pcpu->timer_scheduled) { + port_pcpu->timer_scheduled = true; + interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS; + hrtimer_start(&port_pcpu->tx_done_timer, interval, + HRTIMER_MODE_REL_PINNED); + } +} + +static void mvpp2_tx_proc_cb(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + unsigned int tx_todo, cause; + + if (!netif_running(dev)) + return; + port_pcpu->timer_scheduled = false; + + /* Process all the Tx queues */ + cause = (1 << port->ntxqs) - 1; + tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); + + /* Set the timer in case not all the packets were processed */ + if (tx_todo) + mvpp2_timer_set(port_pcpu); +} + +static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer) +{ + struct mvpp2_port_pcpu *port_pcpu = container_of(timer, + struct mvpp2_port_pcpu, + tx_done_timer); + + tasklet_schedule(&port_pcpu->tx_done_tasklet); + + return HRTIMER_NORESTART; +} + +/* Main RX/TX processing routines */ + +/* Display more error info */ +static void mvpp2_rx_error(struct mvpp2_port *port, + struct mvpp2_rx_desc *rx_desc) +{ + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); + char *err_str = NULL; + + switch (status & MVPP2_RXD_ERR_CODE_MASK) { + case MVPP2_RXD_ERR_CRC: + err_str = "crc"; + break; + case MVPP2_RXD_ERR_OVERRUN: + err_str = "overrun"; + break; + case MVPP2_RXD_ERR_RESOURCE: + err_str = "resource"; + break; + } + if (err_str && net_ratelimit()) + netdev_err(port->dev, + "bad rx status %08x (%s error), size=%zu\n", + status, err_str, sz); +} + +/* Handle RX checksum offload */ +static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, + struct sk_buff *skb) +{ + if (((status & MVPP2_RXD_L3_IP4) && + !(status & MVPP2_RXD_IP4_HEADER_ERR)) || + (status & MVPP2_RXD_L3_IP6)) + if (((status & MVPP2_RXD_L4_UDP) || + (status & MVPP2_RXD_L4_TCP)) && + (status & MVPP2_RXD_L4_CSUM_OK)) { + skb->csum = 0; + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + + skb->ip_summed = CHECKSUM_NONE; +} + +/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ +static int mvpp2_rx_refill(struct mvpp2_port *port, + struct mvpp2_bm_pool *bm_pool, int pool) +{ + dma_addr_t dma_addr; + phys_addr_t phys_addr; + void *buf; + + /* No recycle or too many buffers are in use, so allocate a new skb */ + buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, + GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + + return 0; +} + +/* Handle tx checksum */ +static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_PARTIAL) { + int ip_hdr_len = 0; + u8 l4_proto; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *ip4h = ip_hdr(skb); + + /* Calculate IPv4 checksum and L4 checksum */ + ip_hdr_len = ip4h->ihl; + l4_proto = ip4h->protocol; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + /* Read l4_protocol from one of IPv6 extra headers */ + if (skb_network_header_len(skb) > 0) + ip_hdr_len = (skb_network_header_len(skb) >> 2); + l4_proto = ip6h->nexthdr; + } else { + return MVPP2_TXD_L4_CSUM_NOT; + } + + return mvpp2_txq_desc_csum(skb_network_offset(skb), + skb->protocol, ip_hdr_len, l4_proto); + } + + return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; +} + +/* Main rx processing */ +static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + int rx_todo, struct mvpp2_rx_queue *rxq) +{ + struct net_device *dev = port->dev; + int rx_received; + int rx_done = 0; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; + + /* Get number of received packets and clamp the to-do */ + rx_received = mvpp2_rxq_received(port, rxq->id); + if (rx_todo > rx_received) + rx_todo = rx_received; + + while (rx_done < rx_todo) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + struct mvpp2_bm_pool *bm_pool; + struct sk_buff *skb; + unsigned int frag_size; + dma_addr_t dma_addr; + phys_addr_t phys_addr; + u32 rx_status; + int pool, rx_bytes, err; + void *data; + + rx_done++; + rx_status = mvpp2_rxdesc_status_get(port, rx_desc); + rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); + rx_bytes -= MVPP2_MH_SIZE; + dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); + phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); + data = (void *)phys_to_virt(phys_addr); + + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + bm_pool = &port->priv->bm_pools[pool]; + + /* In case of an error, release the requested buffer pointer + * to the Buffer Manager. This request process is controlled + * by the hardware, and the information about the buffer is + * comprised by the RX descriptor. + */ + if (rx_status & MVPP2_RXD_ERR_SUMMARY) { +err_drop_frame: + dev->stats.rx_errors++; + mvpp2_rx_error(port, rx_desc); + /* Return the buffer to the pool */ + mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); + continue; + } + + if (bm_pool->frag_size > PAGE_SIZE) + frag_size = 0; + else + frag_size = bm_pool->frag_size; + + skb = build_skb(data, frag_size); + if (!skb) { + netdev_warn(port->dev, "skb build failed\n"); + goto err_drop_frame; + } + + err = mvpp2_rx_refill(port, bm_pool, pool); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + goto err_drop_frame; + } + + dma_unmap_single(dev->dev.parent, dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); + skb_put(skb, rx_bytes); + skb->protocol = eth_type_trans(skb, dev); + mvpp2_rx_csum(port, rx_status, skb); + + napi_gro_receive(napi, skb); + } + + if (rcvd_pkts) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += rcvd_pkts; + stats->rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + + /* Update Rx queue management counters */ + wmb(); + mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); + + return rx_todo; +} + +static inline void +tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, + struct mvpp2_tx_desc *desc) +{ + struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + + dma_addr_t buf_dma_addr = + mvpp2_txdesc_dma_addr_get(port, desc); + size_t buf_sz = + mvpp2_txdesc_size_get(port, desc); + if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) + dma_unmap_single(port->dev->dev.parent, buf_dma_addr, + buf_sz, DMA_TO_DEVICE); + mvpp2_txq_desc_put(txq); +} + +/* Handle tx fragmentation processing */ +static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_tx_queue *txq) +{ + struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + struct mvpp2_tx_desc *tx_desc; + int i; + dma_addr_t buf_dma_addr; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = page_address(frag->page.p) + frag->page_offset; + + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, frag->size); + + buf_dma_addr = dma_map_single(port->dev->dev.parent, addr, + frag->size, DMA_TO_DEVICE); + if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) { + mvpp2_txq_desc_put(txq); + goto cleanup; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + if (i == (skb_shinfo(skb)->nr_frags - 1)) { + /* Last descriptor */ + mvpp2_txdesc_cmd_set(port, tx_desc, + MVPP2_TXD_L_DESC); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + } else { + /* Descriptor in the middle: Not First, Not Last */ + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + } + } + + return 0; +cleanup: + /* Release all descriptors that were used to map fragments of + * this packet, as well as the corresponding DMA mappings + */ + for (i = i - 1; i >= 0; i--) { + tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + + return -ENOMEM; +} + +static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, + struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int hdr_sz) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); + + addr = txq_pcpu->tso_headers_dma + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); + + mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | + MVPP2_TXD_F_DESC | + MVPP2_TXD_PADDING_DISABLE); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); +} + +static inline int mvpp2_tso_put_data(struct sk_buff *skb, + struct net_device *dev, struct tso_t *tso, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int sz, bool left, bool last) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t buf_dma_addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, sz); + + buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + return -ENOMEM; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + if (!left) { + mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); + if (last) { + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + return 0; + } + } else { + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + } + + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + return 0; +} + +static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct tso_t tso; + int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); + int i, len, descs = 0; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, + tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + tso_count_descs(skb))) + return 0; + + tso_start(skb, &tso); + len = skb->len - hdr_sz; + while (len > 0) { + int left = min_t(int, skb_shinfo(skb)->gso_size, len); + char *hdr = txq_pcpu->tso_headers + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + + len -= left; + descs++; + + tso_build_hdr(skb, hdr, &tso, left, len == 0); + mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); + + while (left > 0) { + int sz = min_t(int, tso.size, left); + left -= sz; + descs++; + + if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, + txq_pcpu, sz, left, len == 0)) + goto release; + tso_build_data(skb, &tso, sz); + } + } + + return descs; + +release: + for (i = descs - 1; i >= 0; i--) { + struct mvpp2_tx_desc *tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + return 0; +} + +/* Main tx processing */ +static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_queue *txq, *aggr_txq; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_desc *tx_desc; + dma_addr_t buf_dma_addr; + int frags = 0; + u16 txq_id; + u32 tx_cmd; + + txq_id = skb_get_queue_mapping(skb); + txq = port->txqs[txq_id]; + txq_pcpu = this_cpu_ptr(txq->pcpu); + aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + + if (skb_is_gso(skb)) { + frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); + goto out; + } + frags = skb_shinfo(skb)->nr_frags + 1; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port->priv, txq, + txq_pcpu, frags)) { + frags = 0; + goto out; + } + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb)); + + buf_dma_addr = dma_map_single(dev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + frags = 0; + goto out; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); + + tx_cmd = mvpp2_skb_tx_csum(port, skb); + + if (frags == 1) { + /* First and Last descriptor */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + } else { + /* First but not Last */ + tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + + /* Continue with other skb fragments */ + if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { + tx_desc_unmap_put(port, txq, tx_desc); + frags = 0; + } + } + +out: + if (frags > 0) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq_pcpu->reserved_num -= frags; + txq_pcpu->count += frags; + aggr_txq->count += frags; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, frags); + + if (txq_pcpu->count >= txq_pcpu->stop_threshold) + netif_tx_stop_queue(nq); + + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + /* Finalize TX processing */ + if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); + + /* Set the timer in case not all frags were processed */ + if (!port->has_tx_irqs && txq_pcpu->count <= frags && + txq_pcpu->count > 0) { + struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + + mvpp2_timer_set(port_pcpu); + } + + return NETDEV_TX_OK; +} + +static inline void mvpp2_cause_error(struct net_device *dev, int cause) +{ + if (cause & MVPP2_CAUSE_FCS_ERR_MASK) + netdev_err(dev, "FCS error\n"); + if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK) + netdev_err(dev, "rx fifo overrun error\n"); + if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK) + netdev_err(dev, "tx fifo underrun error\n"); +} + +static int mvpp2_poll(struct napi_struct *napi, int budget) +{ + u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; + int rx_done = 0; + struct mvpp2_port *port = netdev_priv(napi->dev); + struct mvpp2_queue_vector *qv; + int cpu = smp_processor_id(); + + qv = container_of(napi, struct mvpp2_queue_vector, napi); + + /* Rx/Tx cause register + * + * Bits 0-15: each bit indicates received packets on the Rx queue + * (bit 0 is for Rx queue 0). + * + * Bits 16-23: each bit indicates transmitted packets on the Tx queue + * (bit 16 is for Tx queue 0). + * + * Each CPU has its own Rx/Tx cause register + */ + cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); + + cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; + if (cause_misc) { + mvpp2_cause_error(port->dev, cause_misc); + + /* Clear the cause register */ + mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); + mvpp2_percpu_write(port->priv, cpu, + MVPP2_ISR_RX_TX_CAUSE_REG(port->id), + cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); + } + + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } + + /* Process RX packets */ + cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx <<= qv->first_rxq; + cause_rx |= qv->pending_cause_rx; + while (cause_rx && budget > 0) { + int count; + struct mvpp2_rx_queue *rxq; + + rxq = mvpp2_get_rx_queue(port, cause_rx); + if (!rxq) + break; + + count = mvpp2_rx(port, napi, budget, rxq); + rx_done += count; + budget -= count; + if (budget > 0) { + /* Clear the bit associated to this Rx queue + * so that next iteration will continue from + * the next Rx queue. + */ + cause_rx &= ~(1 << rxq->logic_rxq); + } + } + + if (budget > 0) { + cause_rx = 0; + napi_complete_done(napi, rx_done); + + mvpp2_qvec_interrupt_enable(qv); + } + qv->pending_cause_rx = cause_rx; + return rx_done; +} + +static void mvpp22_mode_reconfigure(struct mvpp2_port *port) +{ + u32 ctrl3; + + /* comphy reconfiguration */ + mvpp22_comphy_init(port); + + /* gop reconfiguration */ + mvpp22_gop_init(port); + + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0) { + ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); + ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR) + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; + else + ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + + writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); + } + + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) + mvpp2_xlg_max_rx_size_set(port); + else + mvpp2_gmac_max_rx_size_set(port); +} + +/* Set hw internals when starting port */ +static void mvpp2_start_dev(struct mvpp2_port *port) +{ + int i; + + mvpp2_txp_max_tx_size_set(port); + + for (i = 0; i < port->nqvecs; i++) + napi_enable(&port->qvecs[i].napi); + + /* Enable interrupts on all CPUs */ + mvpp2_interrupts_enable(port); + + if (port->priv->hw_version == MVPP22) + mvpp22_mode_reconfigure(port); + + if (port->phylink) { + phylink_start(port->phylink); + } else { + /* Phylink isn't used as of now for ACPI, so the MAC has to be + * configured manually when the interface is started. This will + * be removed as soon as the phylink ACPI support lands in. + */ + struct phylink_link_state state = { + .interface = port->phy_interface, + .link = 1, + }; + mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); + } + + netif_tx_start_all_queues(port->dev); +} + +/* Set hw internals when stopping port */ +static void mvpp2_stop_dev(struct mvpp2_port *port) +{ + int i; + + /* Disable interrupts on all CPUs */ + mvpp2_interrupts_disable(port); + + for (i = 0; i < port->nqvecs; i++) + napi_disable(&port->qvecs[i].napi); + + if (port->phylink) + phylink_stop(port->phylink); + phy_power_off(port->comphy); +} + +static int mvpp2_check_ringparam_valid(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + u16 new_rx_pending = ring->rx_pending; + u16 new_tx_pending = ring->tx_pending; + + if (ring->rx_pending == 0 || ring->tx_pending == 0) + return -EINVAL; + + if (ring->rx_pending > MVPP2_MAX_RXD_MAX) + new_rx_pending = MVPP2_MAX_RXD_MAX; + else if (!IS_ALIGNED(ring->rx_pending, 16)) + new_rx_pending = ALIGN(ring->rx_pending, 16); + + if (ring->tx_pending > MVPP2_MAX_TXD_MAX) + new_tx_pending = MVPP2_MAX_TXD_MAX; + else if (!IS_ALIGNED(ring->tx_pending, 32)) + new_tx_pending = ALIGN(ring->tx_pending, 32); + + /* The Tx ring size cannot be smaller than the minimum number of + * descriptors needed for TSO. + */ + if (new_tx_pending < MVPP2_MAX_SKB_DESCS) + new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32); + + if (ring->rx_pending != new_rx_pending) { + netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", + ring->rx_pending, new_rx_pending); + ring->rx_pending = new_rx_pending; + } + + if (ring->tx_pending != new_tx_pending) { + netdev_info(dev, "illegal Tx ring size value %d, round to %d\n", + ring->tx_pending, new_tx_pending); + ring->tx_pending = new_tx_pending; + } + + return 0; +} + +static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr) +{ + u32 mac_addr_l, mac_addr_m, mac_addr_h; + + mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG); + mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE); + mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH); + addr[0] = (mac_addr_h >> 24) & 0xFF; + addr[1] = (mac_addr_h >> 16) & 0xFF; + addr[2] = (mac_addr_h >> 8) & 0xFF; + addr[3] = mac_addr_h & 0xFF; + addr[4] = mac_addr_m & 0xFF; + addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF; +} + +static int mvpp2_irqs_init(struct mvpp2_port *port) +{ + int err, i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_status_flags(qv->irq, IRQ_NO_BALANCING); + + err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); + if (err) + goto err; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_affinity_hint(qv->irq, + cpumask_of(qv->sw_thread_id)); + } + + return 0; +err: + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + free_irq(qv->irq, qv); + } + + return err; +} + +static void mvpp2_irqs_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + irq_set_affinity_hint(qv->irq, NULL); + irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING); + free_irq(qv->irq, qv); + } +} + +static int mvpp2_open(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2 *priv = port->priv; + unsigned char mac_bcast[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + bool valid = false; + int err; + + err = mvpp2_prs_mac_da_accept(port, mac_bcast, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); + return err; + } + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true); + if (err) { + netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n"); + return err; + } + err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH); + if (err) { + netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n"); + return err; + } + err = mvpp2_prs_def_flow(port); + if (err) { + netdev_err(dev, "mvpp2_prs_def_flow failed\n"); + return err; + } + + /* Allocate the Rx/Tx queues */ + err = mvpp2_setup_rxqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Rx queues\n"); + return err; + } + + err = mvpp2_setup_txqs(port); + if (err) { + netdev_err(port->dev, "cannot allocate Tx queues\n"); + goto err_cleanup_rxqs; + } + + err = mvpp2_irqs_init(port); + if (err) { + netdev_err(port->dev, "cannot init IRQs\n"); + goto err_cleanup_txqs; + } + + /* Phylink isn't supported yet in ACPI mode */ + if (port->of_node) { + err = phylink_of_phy_connect(port->phylink, port->of_node, 0); + if (err) { + netdev_err(port->dev, "could not attach PHY (%d)\n", + err); + goto err_free_irq; + } + + valid = true; + } + + if (priv->hw_version == MVPP22 && port->link_irq && !port->phylink) { + err = request_irq(port->link_irq, mvpp2_link_status_isr, 0, + dev->name, port); + if (err) { + netdev_err(port->dev, "cannot request link IRQ %d\n", + port->link_irq); + goto err_free_irq; + } + + mvpp22_gop_setup_irq(port); + + /* In default link is down */ + netif_carrier_off(port->dev); + + valid = true; + } else { + port->link_irq = 0; + } + + if (!valid) { + netdev_err(port->dev, + "invalid configuration: no dt or link IRQ"); + goto err_free_irq; + } + + /* Unmask interrupts on all CPUs */ + on_each_cpu(mvpp2_interrupts_unmask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, false); + + mvpp2_start_dev(port); + + if (priv->hw_version == MVPP22) + mvpp22_init_rss(port); + + /* Start hardware statistics gathering */ + queue_delayed_work(priv->stats_queue, &port->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + return 0; + +err_free_irq: + mvpp2_irqs_deinit(port); +err_cleanup_txqs: + mvpp2_cleanup_txqs(port); +err_cleanup_rxqs: + mvpp2_cleanup_rxqs(port); + return err; +} + +static int mvpp2_stop(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port_pcpu *port_pcpu; + int cpu; + + mvpp2_stop_dev(port); + + /* Mask interrupts on all CPUs */ + on_each_cpu(mvpp2_interrupts_mask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, true); + + if (port->phylink) + phylink_disconnect_phy(port->phylink); + if (port->link_irq) + free_irq(port->link_irq, port); + + mvpp2_irqs_deinit(port); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } + } + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + cancel_delayed_work_sync(&port->stats_work); + + return 0; +} + +static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port, + struct netdev_hw_addr_list *list) +{ + struct netdev_hw_addr *ha; + int ret; + + netdev_hw_addr_list_for_each(ha, list) { + ret = mvpp2_prs_mac_da_accept(port, ha->addr, true); + if (ret) + return ret; + } + + return 0; +} + +static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable) +{ + if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + mvpp2_prs_vid_enable_filtering(port); + else + mvpp2_prs_vid_disable_filtering(port); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, enable); + + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, enable); +} + +static void mvpp2_set_rx_mode(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Clear the whole UC and MC list */ + mvpp2_prs_mac_del_all(port); + + if (dev->flags & IFF_PROMISC) { + mvpp2_set_rx_promisc(port, true); + return; + } + + mvpp2_set_rx_promisc(port, false); + + if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->uc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_UNI_CAST, true); + + if (dev->flags & IFF_ALLMULTI) { + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); + return; + } + + if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX || + mvpp2_prs_mac_da_accept_list(port, &dev->mc)) + mvpp2_prs_mac_promisc_set(port->priv, port->id, + MVPP2_PRS_L2_MULTI_CAST, true); +} + +static int mvpp2_set_mac_address(struct net_device *dev, void *p) +{ + const struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = mvpp2_prs_update_mac_da(dev, addr->sa_data); + if (err) { + /* Reconfigure parser accept the original MAC address */ + mvpp2_prs_update_mac_da(dev, dev->dev_addr); + netdev_err(dev, "failed to change MAC address\n"); + } + return err; +} + +static int mvpp2_change_mtu(struct net_device *dev, int mtu) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) { + netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu, + ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8)); + mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8); + } + + if (!netif_running(dev)) { + err = mvpp2_bm_update_mtu(dev, mtu); + if (!err) { + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); + return 0; + } + + /* Reconfigure BM to the original MTU */ + err = mvpp2_bm_update_mtu(dev, dev->mtu); + if (err) + goto log_error; + } + + mvpp2_stop_dev(port); + + err = mvpp2_bm_update_mtu(dev, mtu); + if (!err) { + port->pkt_size = MVPP2_RX_PKT_SIZE(mtu); + goto out_start; + } + + /* Reconfigure BM to the original MTU */ + err = mvpp2_bm_update_mtu(dev, dev->mtu); + if (err) + goto log_error; + +out_start: + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + + return 0; +log_error: + netdev_err(dev, "failed to change MTU\n"); + return err; +} + +static void +mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mvpp2_port *port = netdev_priv(dev); + unsigned int start; + int cpu; + + for_each_possible_cpu(cpu) { + struct mvpp2_pcpu_stats *cpu_stats; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + + cpu_stats = per_cpu_ptr(port->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; + tx_packets = cpu_stats->tx_packets; + tx_bytes = cpu_stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + } + + stats->rx_errors = dev->stats.rx_errors; + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; +} + +static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_mii_ioctl(port->phylink, ifr, cmd); +} + +static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + int ret; + + ret = mvpp2_prs_vid_entry_add(port, vid); + if (ret) + netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n", + MVPP2_PRS_VLAN_FILT_MAX - 1); + return ret; +} + +static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mvpp2_port *port = netdev_priv(dev); + + mvpp2_prs_vid_entry_remove(port, vid); + return 0; +} + +static int mvpp2_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct mvpp2_port *port = netdev_priv(dev); + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + mvpp2_prs_vid_enable_filtering(port); + } else { + /* Invalidate all registered VID filters for this + * port + */ + mvpp2_prs_vid_remove_all(port); + + mvpp2_prs_vid_disable_filtering(port); + } + } + + return 0; +} + +/* Ethtool methods */ + +static int mvpp2_ethtool_nway_reset(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_nway_reset(port->phylink); +} + +/* Set interrupt coalescing for ethtools */ +static int mvpp2_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + int queue; + + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->time_coal = c->rx_coalesce_usecs; + rxq->pkts_coal = c->rx_max_coalesced_frames; + mvpp2_rx_pkts_coal_set(port, rxq); + mvpp2_rx_time_coal_set(port, rxq); + } + + if (port->has_tx_irqs) { + port->tx_time_coal = c->tx_coalesce_usecs; + mvpp2_tx_time_coal_set(port); + } + + for (queue = 0; queue < port->ntxqs; queue++) { + struct mvpp2_tx_queue *txq = port->txqs[queue]; + + txq->done_pkts_coal = c->tx_max_coalesced_frames; + + if (port->has_tx_irqs) + mvpp2_tx_pkts_coal_set(port, txq); + } + + return 0; +} + +/* get coalescing for ethtools */ +static int mvpp2_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct mvpp2_port *port = netdev_priv(dev); + + c->rx_coalesce_usecs = port->rxqs[0]->time_coal; + c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal; + c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal; + c->tx_coalesce_usecs = port->tx_time_coal; + return 0; +} + +static void mvpp2_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, dev_name(&dev->dev), + sizeof(drvinfo->bus_info)); +} + +static void mvpp2_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + + ring->rx_max_pending = MVPP2_MAX_RXD_MAX; + ring->tx_max_pending = MVPP2_MAX_TXD_MAX; + ring->rx_pending = port->rx_ring_size; + ring->tx_pending = port->tx_ring_size; +} + +static int mvpp2_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct mvpp2_port *port = netdev_priv(dev); + u16 prev_rx_ring_size = port->rx_ring_size; + u16 prev_tx_ring_size = port->tx_ring_size; + int err; + + err = mvpp2_check_ringparam_valid(dev, ring); + if (err) + return err; + + if (!netif_running(dev)) { + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + return 0; + } + + /* The interface is running, so we have to force a + * reallocation of the queues + */ + mvpp2_stop_dev(port); + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + + port->rx_ring_size = ring->rx_pending; + port->tx_ring_size = ring->tx_pending; + + err = mvpp2_setup_rxqs(port); + if (err) { + /* Reallocate Rx queues with the original ring size */ + port->rx_ring_size = prev_rx_ring_size; + ring->rx_pending = prev_rx_ring_size; + err = mvpp2_setup_rxqs(port); + if (err) + goto err_out; + } + err = mvpp2_setup_txqs(port); + if (err) { + /* Reallocate Tx queues with the original ring size */ + port->tx_ring_size = prev_tx_ring_size; + ring->tx_pending = prev_tx_ring_size; + err = mvpp2_setup_txqs(port); + if (err) + goto err_clean_rxqs; + } + + mvpp2_start_dev(port); + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + + return 0; + +err_clean_rxqs: + mvpp2_cleanup_rxqs(port); +err_out: + netdev_err(dev, "failed to change ring parameters"); + return err; +} + +static void mvpp2_ethtool_get_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return; + + phylink_ethtool_get_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_set_pause_param(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_set_pauseparam(port->phylink, pause); +} + +static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_get(port->phylink, cmd); +} + +static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (!port->phylink) + return -ENOTSUPP; + + return phylink_ethtool_ksettings_set(port->phylink, cmd); +} + +/* Device ops */ + +static const struct net_device_ops mvpp2_netdev_ops = { + .ndo_open = mvpp2_open, + .ndo_stop = mvpp2_stop, + .ndo_start_xmit = mvpp2_tx, + .ndo_set_rx_mode = mvpp2_set_rx_mode, + .ndo_set_mac_address = mvpp2_set_mac_address, + .ndo_change_mtu = mvpp2_change_mtu, + .ndo_get_stats64 = mvpp2_get_stats64, + .ndo_do_ioctl = mvpp2_ioctl, + .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, + .ndo_set_features = mvpp2_set_features, +}; + +static const struct ethtool_ops mvpp2_eth_tool_ops = { + .nway_reset = mvpp2_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .set_coalesce = mvpp2_ethtool_set_coalesce, + .get_coalesce = mvpp2_ethtool_get_coalesce, + .get_drvinfo = mvpp2_ethtool_get_drvinfo, + .get_ringparam = mvpp2_ethtool_get_ringparam, + .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_strings = mvpp2_ethtool_get_strings, + .get_ethtool_stats = mvpp2_ethtool_get_stats, + .get_sset_count = mvpp2_ethtool_get_sset_count, + .get_pauseparam = mvpp2_ethtool_get_pause_param, + .set_pauseparam = mvpp2_ethtool_set_pause_param, + .get_link_ksettings = mvpp2_ethtool_get_link_ksettings, + .set_link_ksettings = mvpp2_ethtool_set_link_ksettings, +}; + +/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that + * had a single IRQ defined per-port. + */ +static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v = &port->qvecs[0]; + + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + v->sw_thread_id = 0; + v->sw_thread_mask = *cpumask_bits(cpu_online_mask); + v->port = port; + v->irq = irq_of_parse_and_map(port_node, 0); + if (v->irq <= 0) + return -EINVAL; + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + + port->nqvecs = 1; + + return 0; +} + +static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v; + int i, ret; + + port->nqvecs = num_possible_cpus(); + if (queue_mode == MVPP2_QDIST_SINGLE_MODE) + port->nqvecs += 1; + + for (i = 0; i < port->nqvecs; i++) { + char irqname[16]; + + v = port->qvecs + i; + + v->port = port; + v->type = MVPP2_QUEUE_VECTOR_PRIVATE; + v->sw_thread_id = i; + v->sw_thread_mask = BIT(i); + + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + + if (queue_mode == MVPP2_QDIST_MULTI_MODE) { + v->first_rxq = i * MVPP2_DEFAULT_RXQ; + v->nrxqs = MVPP2_DEFAULT_RXQ; + } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && + i == (port->nqvecs - 1)) { + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + strncpy(irqname, "rx-shared", sizeof(irqname)); + } + + if (port_node) + v->irq = of_irq_get_byname(port_node, irqname); + else + v->irq = fwnode_irq_get(port->fwnode, i); + if (v->irq <= 0) { + ret = -EINVAL; + goto err; + } + + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + } + + return 0; + +err: + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); + return ret; +} + +static int mvpp2_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + if (port->has_tx_irqs) + return mvpp2_multi_queue_vectors_init(port, port_node); + else + return mvpp2_simple_queue_vectors_init(port, port_node); +} + +static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); +} + +/* Configure Rx queue group interrupt for this port */ +static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + int i; + + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + port->nrxqs); + return; + } + + /* Handle the more complicated PPv2.2 case */ + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (!qv->nrxqs) + continue; + + val = qv->sw_thread_id; + val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = qv->first_rxq; + val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } +} + +/* Initialize port HW */ +static int mvpp2_port_init(struct mvpp2_port *port) +{ + struct device *dev = port->dev->dev.parent; + struct mvpp2 *priv = port->priv; + struct mvpp2_txq_pcpu *txq_pcpu; + int queue, cpu, err; + + /* Checks for hardware constraints */ + if (port->first_rxq + port->nrxqs > + MVPP2_MAX_PORTS * priv->max_port_rxqs) + return -EINVAL; + + if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || + (port->ntxqs > MVPP2_MAX_TXQ)) + return -EINVAL; + + /* Disable port */ + mvpp2_egress_disable(port); + mvpp2_port_disable(port); + + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; + + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), + GFP_KERNEL); + if (!port->txqs) + return -ENOMEM; + + /* Associate physical Tx queues to this port and initialize. + * The mapping is predefined. + */ + for (queue = 0; queue < port->ntxqs; queue++) { + int queue_phy_id = mvpp2_txq_phys(port->id, queue); + struct mvpp2_tx_queue *txq; + + txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); + if (!txq) { + err = -ENOMEM; + goto err_free_percpu; + } + + txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu); + if (!txq->pcpu) { + err = -ENOMEM; + goto err_free_percpu; + } + + txq->id = queue_phy_id; + txq->log_id = queue; + txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; + for_each_present_cpu(cpu) { + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu->cpu = cpu; + } + + port->txqs[queue] = txq; + } + + port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), + GFP_KERNEL); + if (!port->rxqs) { + err = -ENOMEM; + goto err_free_percpu; + } + + /* Allocate and initialize Rx queue for this port */ + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq; + + /* Map physical Rx queue to port's logical Rx queue */ + rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); + if (!rxq) { + err = -ENOMEM; + goto err_free_percpu; + } + /* Map this Rx queue to a physical queue */ + rxq->id = port->first_rxq + queue; + rxq->port = port->id; + rxq->logic_rxq = queue; + + port->rxqs[queue] = rxq; + } + + mvpp2_rx_irqs_setup(port); + + /* Create Rx descriptor rings */ + for (queue = 0; queue < port->nrxqs; queue++) { + struct mvpp2_rx_queue *rxq = port->rxqs[queue]; + + rxq->size = port->rx_ring_size; + rxq->pkts_coal = MVPP2_RX_COAL_PKTS; + rxq->time_coal = MVPP2_RX_COAL_USEC; + } + + mvpp2_ingress_disable(port); + + /* Port default configuration */ + mvpp2_defaults_set(port); + + /* Port's classifier configuration */ + mvpp2_cls_oversize_rxq_set(port); + mvpp2_cls_port_config(port); + + /* Provide an initial Rx packet size */ + port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu); + + /* Initialize pools for swf */ + err = mvpp2_swf_bm_pool_init(port); + if (err) + goto err_free_percpu; + + return 0; + +err_free_percpu: + for (queue = 0; queue < port->ntxqs; queue++) { + if (!port->txqs[queue]) + continue; + free_percpu(port->txqs[queue]->pcpu); + } + return err; +} + +/* Checks if the port DT description has the TX interrupts + * described. On PPv2.1, there are no such interrupts. On PPv2.2, + * there are available, but we need to keep support for old DTs. + */ +static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, + struct device_node *port_node) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", + "tx-cpu2", "tx-cpu3" }; + int ret, i; + + if (priv->hw_version == MVPP21) + return false; + + for (i = 0; i < 5; i++) { + ret = of_property_match_string(port_node, "interrupt-names", + irqs[i]); + if (ret < 0) + return false; + } + + return true; +} + +static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, + struct fwnode_handle *fwnode, + char **mac_from) +{ + struct mvpp2_port *port = netdev_priv(dev); + char hw_mac_addr[ETH_ALEN] = {0}; + char fw_mac_addr[ETH_ALEN]; + + if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) { + *mac_from = "firmware node"; + ether_addr_copy(dev->dev_addr, fw_mac_addr); + return; + } + + if (priv->hw_version == MVPP21) { + mvpp21_get_mac_address(port, hw_mac_addr); + if (is_valid_ether_addr(hw_mac_addr)) { + *mac_from = "hardware"; + ether_addr_copy(dev->dev_addr, hw_mac_addr); + return; + } + } + + *mac_from = "random"; + eth_hw_addr_random(dev); +} + +static void mvpp2_phylink_validate(struct net_device *dev, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + switch (state->interface) { + case PHY_INTERFACE_MODE_10GKR: + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); + phylink_set(mask, 10000baseKR_Full); + /* Fall-through */ + default: + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 10000baseT_Full); + /* Fall-through */ + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseX_Full); + } + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void mvpp22_xlg_link_state(struct mvpp2_port *port, + struct phylink_link_state *state) +{ + u32 val; + + state->speed = SPEED_10000; + state->duplex = 1; + state->an_complete = 1; + + val = readl(port->base + MVPP22_XLG_STATUS); + state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP); + + state->pause = 0; + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_TX; + if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN) + state->pause |= MLO_PAUSE_RX; +} + +static void mvpp2_gmac_link_state(struct mvpp2_port *port, + struct phylink_link_state *state) +{ + u32 val; + + val = readl(port->base + MVPP2_GMAC_STATUS0); + + state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE); + state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP); + state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX); + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_1000BASEX: + state->speed = SPEED_1000; + break; + case PHY_INTERFACE_MODE_2500BASEX: + state->speed = SPEED_2500; + break; + default: + if (val & MVPP2_GMAC_STATUS0_GMII_SPEED) + state->speed = SPEED_1000; + else if (val & MVPP2_GMAC_STATUS0_MII_SPEED) + state->speed = SPEED_100; + else + state->speed = SPEED_10; + } + + state->pause = 0; + if (val & MVPP2_GMAC_STATUS0_RX_PAUSE) + state->pause |= MLO_PAUSE_RX; + if (val & MVPP2_GMAC_STATUS0_TX_PAUSE) + state->pause |= MLO_PAUSE_TX; +} + +static int mvpp2_phylink_mac_link_state(struct net_device *dev, + struct phylink_link_state *state) +{ + struct mvpp2_port *port = netdev_priv(dev); + + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { + u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); + mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK; + + if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) { + mvpp22_xlg_link_state(port, state); + return 1; + } + } + + mvpp2_gmac_link_state(port, state); + return 1; +} + +static void mvpp2_mac_an_restart(struct net_device *dev) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) + return; + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + /* The RESTART_AN bit is cleared by the h/w after restarting the AN + * process. + */ + val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 ctrl0, ctrl4; + + ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); + ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); + + if (state->pause & MLO_PAUSE_TX) + ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + + ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; + ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | + MVPP22_XLG_CTRL4_EN_IDLE_CHECK; + + writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); + writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); +} + +static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, + const struct phylink_link_state *state) +{ + u32 an, ctrl0, ctrl2, ctrl4; + + an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + + /* Force link down */ + an &= ~MVPP2_GMAC_FORCE_LINK_PASS; + an |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* Set the GMAC in a reset state */ + ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + + an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | + MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | + MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | + MVPP2_GMAC_FORCE_LINK_DOWN); + ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; + ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) { + /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can + * they negotiate duplex: they are always operating with a fixed + * speed of 1000/2500Mbps in full duplex, so force 1000/2500 + * speed and full duplex here. + */ + ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; + an |= MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; + } else if (!phy_interface_mode_is_rgmii(state->interface)) { + an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; + } + + if (state->duplex) + an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + if (phylink_test(state->advertising, Pause)) + an |= MVPP2_GMAC_FC_ADV_EN; + if (phylink_test(state->advertising, Asym_Pause)) + an |= MVPP2_GMAC_FC_ADV_ASM_EN; + + if (state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) { + an |= MVPP2_GMAC_IN_BAND_AUTONEG; + ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; + + ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + + if (state->pause & MLO_PAUSE_TX) + ctrl4 |= MVPP22_CTRL4_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl4 |= MVPP22_CTRL4_RX_FC_EN; + } else if (phy_interface_mode_is_rgmii(state->interface)) { + an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; + + if (state->speed == SPEED_1000) + an |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (state->speed == SPEED_100) + an |= MVPP2_GMAC_CONFIG_MII_SPEED; + + ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; + ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } + + writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Check for invalid configuration */ + if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { + netdev_err(dev, "Invalid mode on %s\n", dev->name); + return; + } + + netif_tx_stop_all_queues(port->dev); + if (!port->has_phy) + netif_carrier_off(port->dev); + + /* Make sure the port is disabled when reconfiguring the mode */ + mvpp2_port_disable(port); + + if (port->priv->hw_version == MVPP22 && + port->phy_interface != state->interface) { + port->phy_interface = state->interface; + + /* Reconfigure the serdes lanes */ + phy_power_off(port->comphy); + mvpp22_mode_reconfigure(port); + } + + /* mac (re)configuration */ + if (state->interface == PHY_INTERFACE_MODE_10GKR) + mvpp2_xlg_config(port, mode, state); + else if (phy_interface_mode_is_rgmii(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX) + mvpp2_gmac_config(port, mode, state); + + if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port, state); + + /* If the port already was up, make sure it's still in the same state */ + if (state->link || !port->has_phy) { + mvpp2_port_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + if (!port->has_phy) + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } +} + +static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, + phy_interface_t interface, struct phy_device *phy) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (!phylink_autoneg_inband(mode) && + interface != PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; + if (phy_interface_mode_is_rgmii(interface)) + val |= MVPP2_GMAC_FORCE_LINK_PASS; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + mvpp2_port_enable(port); + + mvpp2_egress_enable(port); + mvpp2_ingress_enable(port); + netif_tx_wake_all_queues(dev); +} + +static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, + phy_interface_t interface) +{ + struct mvpp2_port *port = netdev_priv(dev); + u32 val; + + if (!phylink_autoneg_inband(mode) && + interface != PHY_INTERFACE_MODE_10GKR) { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } + + netif_tx_stop_all_queues(dev); + mvpp2_egress_disable(port); + mvpp2_ingress_disable(port); + + /* When using link interrupts to notify phylink of a MAC state change, + * we do not want the port to be disabled (we want to receive further + * interrupts, to be notified when the port will have a link later). + */ + if (!port->has_phy) + return; + + mvpp2_port_disable(port); +} + +static const struct phylink_mac_ops mvpp2_phylink_ops = { + .validate = mvpp2_phylink_validate, + .mac_link_state = mvpp2_phylink_mac_link_state, + .mac_an_restart = mvpp2_mac_an_restart, + .mac_config = mvpp2_mac_config, + .mac_link_up = mvpp2_mac_link_up, + .mac_link_down = mvpp2_mac_link_down, +}; + +/* Ports initialization */ +static int mvpp2_port_probe(struct platform_device *pdev, + struct fwnode_handle *port_fwnode, + struct mvpp2 *priv) +{ + struct phy *comphy = NULL; + struct mvpp2_port *port; + struct mvpp2_port_pcpu *port_pcpu; + struct device_node *port_node = to_of_node(port_fwnode); + struct net_device *dev; + struct resource *res; + struct phylink *phylink; + char *mac_from = ""; + unsigned int ntxqs, nrxqs; + bool has_tx_irqs; + u32 id; + int features; + int phy_mode; + int err, i, cpu; + + if (port_node) { + has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); + } else { + has_tx_irqs = true; + queue_mode = MVPP2_QDIST_MULTI_MODE; + } + + if (!has_tx_irqs) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + + ntxqs = MVPP2_MAX_TXQ; + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) + nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); + else + nrxqs = MVPP2_DEFAULT_RXQ; + + dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); + if (!dev) + return -ENOMEM; + + phy_mode = fwnode_get_phy_mode(port_fwnode); + if (phy_mode < 0) { + dev_err(&pdev->dev, "incorrect phy mode\n"); + err = phy_mode; + goto err_free_netdev; + } + + if (port_node) { + comphy = devm_of_phy_get(&pdev->dev, port_node, NULL); + if (IS_ERR(comphy)) { + if (PTR_ERR(comphy) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_free_netdev; + } + comphy = NULL; + } + } + + if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing port-id value\n"); + goto err_free_netdev; + } + + dev->tx_queue_len = MVPP2_MAX_TXD_MAX; + dev->watchdog_timeo = 5 * HZ; + dev->netdev_ops = &mvpp2_netdev_ops; + dev->ethtool_ops = &mvpp2_eth_tool_ops; + + port = netdev_priv(dev); + port->dev = dev; + port->fwnode = port_fwnode; + port->has_phy = !!of_find_property(port_node, "phy", NULL); + port->ntxqs = ntxqs; + port->nrxqs = nrxqs; + port->priv = priv; + port->has_tx_irqs = has_tx_irqs; + + err = mvpp2_queue_vectors_init(port, port_node); + if (err) + goto err_free_netdev; + + if (port_node) + port->link_irq = of_irq_get_byname(port_node, "link"); + else + port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1); + if (port->link_irq == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_deinit_qvecs; + } + if (port->link_irq <= 0) + /* the link irq is optional */ + port->link_irq = 0; + + if (fwnode_property_read_bool(port_fwnode, "marvell,loopback")) + port->flags |= MVPP2_F_LOOPBACK; + + port->id = id; + if (priv->hw_version == MVPP21) + port->first_rxq = port->id * port->nrxqs; + else + port->first_rxq = port->id * priv->max_port_rxqs; + + port->of_node = port_node; + port->phy_interface = phy_mode; + port->comphy = comphy; + + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id); + port->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(port->base)) { + err = PTR_ERR(port->base); + goto err_free_irq; + } + + port->stats_base = port->priv->lms_base + + MVPP21_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; + } else { + if (fwnode_property_read_u32(port_fwnode, "gop-port-id", + &port->gop_id)) { + err = -EINVAL; + dev_err(&pdev->dev, "missing gop-port-id value\n"); + goto err_deinit_qvecs; + } + + port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); + port->stats_base = port->priv->iface_base + + MVPP22_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; + } + + /* Alloc per-cpu and ethtool stats */ + port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); + if (!port->stats) { + err = -ENOMEM; + goto err_free_irq; + } + + port->ethtool_stats = devm_kcalloc(&pdev->dev, + ARRAY_SIZE(mvpp2_ethtool_regs), + sizeof(u64), GFP_KERNEL); + if (!port->ethtool_stats) { + err = -ENOMEM; + goto err_free_stats; + } + + mutex_init(&port->gather_stats_lock); + INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); + + mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from); + + port->tx_ring_size = MVPP2_MAX_TXD_DFLT; + port->rx_ring_size = MVPP2_MAX_RXD_DFLT; + SET_NETDEV_DEV(dev, &pdev->dev); + + err = mvpp2_port_init(port); + if (err < 0) { + dev_err(&pdev->dev, "failed to init port %d\n", id); + goto err_free_stats; + } + + mvpp2_port_periodic_xon_disable(port); + + mvpp2_port_reset(port); + + port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); + if (!port->pcpu) { + err = -ENOMEM; + goto err_free_txq_pcpu; + } + + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); + + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; + + tasklet_init(&port_pcpu->tx_done_tasklet, + mvpp2_tx_proc_cb, + (unsigned long)dev); + } + } + + features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO; + dev->features = features | NETIF_F_RXCSUM; + dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) { + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + } + + dev->vlan_features |= features; + dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; + dev->priv_flags |= IFF_UNICAST_FLT; + + /* MTU range: 68 - 9704 */ + dev->min_mtu = ETH_MIN_MTU; + /* 9704 == 9728 - 20 and rounding to 8 */ + dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE; + + /* Phylink isn't used w/ ACPI as of now */ + if (port_node) { + phylink = phylink_create(dev, port_fwnode, phy_mode, + &mvpp2_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free_port_pcpu; + } + port->phylink = phylink; + } else { + port->phylink = NULL; + } + + err = register_netdev(dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to register netdev\n"); + goto err_phylink; + } + netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); + + priv->port_list[priv->port_count++] = port; + + return 0; + +err_phylink: + if (port->phylink) + phylink_destroy(port->phylink); +err_free_port_pcpu: + free_percpu(port->pcpu); +err_free_txq_pcpu: + for (i = 0; i < port->ntxqs; i++) + free_percpu(port->txqs[i]->pcpu); +err_free_stats: + free_percpu(port->stats); +err_free_irq: + if (port->link_irq) + irq_dispose_mapping(port->link_irq); +err_deinit_qvecs: + mvpp2_queue_vectors_deinit(port); +err_free_netdev: + free_netdev(dev); + return err; +} + +/* Ports removal routine */ +static void mvpp2_port_remove(struct mvpp2_port *port) +{ + int i; + + unregister_netdev(port->dev); + if (port->phylink) + phylink_destroy(port->phylink); + free_percpu(port->pcpu); + free_percpu(port->stats); + for (i = 0; i < port->ntxqs; i++) + free_percpu(port->txqs[i]->pcpu); + mvpp2_queue_vectors_deinit(port); + if (port->link_irq) + irq_dispose_mapping(port->link_irq); + free_netdev(port->dev); +} + +/* Initialize decoding windows */ +static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, + struct mvpp2 *priv) +{ + u32 win_enable; + int i; + + for (i = 0; i < 6; i++) { + mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); + mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); + + if (i < 4) + mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); + } + + win_enable = 0; + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + mvpp2_write(priv, MVPP2_WIN_BASE(i), + (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | + dram->mbus_dram_target_id); + + mvpp2_write(priv, MVPP2_WIN_SIZE(i), + (cs->size - 1) & 0xffff0000); + + win_enable |= (1 << i); + } + + mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); +} + +/* Initialize Rx FIFO's */ +static void mvpp2_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +static void mvpp22_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + /* The FIFO size parameters are set depending on the maximum speed a + * given port can handle: + * - Port 0: 10Gbps + * - Port 1: 2.5Gbps + * - Ports 2 and 3: 1Gbps + */ + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); + + for (port = 2; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G + * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G, + * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB. + */ +static void mvpp22_tx_fifo_init(struct mvpp2 *priv) +{ + int port, size, thrs; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + if (port == 0) { + size = MVPP22_TX_FIFO_DATA_SIZE_10KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_10KB; + } else { + size = MVPP22_TX_FIFO_DATA_SIZE_3KB; + thrs = MVPP2_TX_FIFO_THRESHOLD_3KB; + } + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size); + mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs); + } +} + +static void mvpp2_axi_init(struct mvpp2 *priv) +{ + u32 val, rdval, wrval; + + mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); + + /* AXI Bridge Configuration */ + + rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_ATTR_CACHE_OFFS; + wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_ATTR_DOMAIN_OFFS; + + /* BM */ + mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); + + /* Descriptors */ + mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); + mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); + + /* Buffer Data */ + mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); + mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); + + val = MVPP22_AXI_CODE_CACHE_NON_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); + mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_RD_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); + + val = MVPP22_AXI_CODE_CACHE_WR_CACHE + << MVPP22_AXI_CODE_CACHE_OFFS; + val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM + << MVPP22_AXI_CODE_DOMAIN_OFFS; + + mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); +} + +/* Initialize network controller common part HW */ +static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + const struct mbus_dram_target_info *dram_target_info; + int err, i; + u32 val; + + /* MBUS windows configuration */ + dram_target_info = mv_mbus_dram_info(); + if (dram_target_info) + mvpp2_conf_mbus_windows(dram_target_info, priv); + + if (priv->hw_version == MVPP22) + mvpp2_axi_init(priv); + + /* Disable HW PHY polling */ + if (priv->hw_version == MVPP21) { + val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + val |= MVPP2_PHY_AN_STOP_SMI0_MASK; + writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); + } else { + val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + val &= ~MVPP22_SMI_POLLING_EN; + writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); + } + + /* Allocate and initialize aggregated TXQs */ + priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + sizeof(*priv->aggr_txqs), + GFP_KERNEL); + if (!priv->aggr_txqs) + return -ENOMEM; + + for_each_present_cpu(i) { + priv->aggr_txqs[i].id = i; + priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; + err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); + if (err < 0) + return err; + } + + /* Fifo Init */ + if (priv->hw_version == MVPP21) { + mvpp2_rx_fifo_init(priv); + } else { + mvpp22_rx_fifo_init(priv); + mvpp22_tx_fifo_init(priv); + } + + if (priv->hw_version == MVPP21) + writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, + priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); + + /* Allow cache snoop when transmiting packets */ + mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); + + /* Buffer Manager initialization */ + err = mvpp2_bm_init(pdev, priv); + if (err < 0) + return err; + + /* Parser default initialization */ + err = mvpp2_prs_default_init(pdev, priv); + if (err < 0) + return err; + + /* Classifier default initialization */ + mvpp2_cls_init(priv); + + return 0; +} + +static int mvpp2_probe(struct platform_device *pdev) +{ + const struct acpi_device_id *acpi_id; + struct fwnode_handle *fwnode = pdev->dev.fwnode; + struct fwnode_handle *port_fwnode; + struct mvpp2 *priv; + struct resource *res; + void __iomem *base; + int i; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + if (has_acpi_companion(&pdev->dev)) { + acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, + &pdev->dev); + priv->hw_version = (unsigned long)acpi_id->driver_data; + } else { + priv->hw_version = + (unsigned long)of_device_get_match_data(&pdev->dev); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (priv->hw_version == MVPP21) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->lms_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->lms_base)) + return PTR_ERR(priv->lms_base); + } else { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (has_acpi_companion(&pdev->dev)) { + /* In case the MDIO memory region is declared in + * the ACPI, it can already appear as 'in-use' + * in the OS. Because it is overlapped by second + * region of the network controller, make + * sure it is released, before requesting it again. + * The care is taken by mvpp2 driver to avoid + * concurrent access to this memory region. + */ + release_resource(res); + } + priv->iface_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->iface_base)) + return PTR_ERR(priv->iface_base); + } + + if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) { + priv->sysctrl_base = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "marvell,system-controller"); + if (IS_ERR(priv->sysctrl_base)) + /* The system controller regmap is optional for dt + * compatibility reasons. When not provided, the + * configuration of the GoP relies on the + * firmware/bootloader. + */ + priv->sysctrl_base = NULL; + } + + mvpp2_setup_bm_pool(); + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + u32 addr_space_sz; + + addr_space_sz = (priv->hw_version == MVPP21 ? + MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); + priv->swth_base[i] = base + i * addr_space_sz; + } + + if (priv->hw_version == MVPP21) + priv->max_port_rxqs = 8; + else + priv->max_port_rxqs = 32; + + if (dev_of_node(&pdev->dev)) { + priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk"); + if (IS_ERR(priv->pp_clk)) + return PTR_ERR(priv->pp_clk); + err = clk_prepare_enable(priv->pp_clk); + if (err < 0) + return err; + + priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk"); + if (IS_ERR(priv->gop_clk)) { + err = PTR_ERR(priv->gop_clk); + goto err_pp_clk; + } + err = clk_prepare_enable(priv->gop_clk); + if (err < 0) + goto err_pp_clk; + + if (priv->hw_version == MVPP22) { + priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk"); + if (IS_ERR(priv->mg_clk)) { + err = PTR_ERR(priv->mg_clk); + goto err_gop_clk; + } + + err = clk_prepare_enable(priv->mg_clk); + if (err < 0) + goto err_gop_clk; + + priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk"); + if (IS_ERR(priv->mg_core_clk)) { + priv->mg_core_clk = NULL; + } else { + err = clk_prepare_enable(priv->mg_core_clk); + if (err < 0) + goto err_mg_clk; + } + } + + priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); + if (IS_ERR(priv->axi_clk)) { + err = PTR_ERR(priv->axi_clk); + if (err == -EPROBE_DEFER) + goto err_mg_core_clk; + priv->axi_clk = NULL; + } else { + err = clk_prepare_enable(priv->axi_clk); + if (err < 0) + goto err_mg_core_clk; + } + + /* Get system's tclk rate */ + priv->tclk = clk_get_rate(priv->pp_clk); + } else if (device_property_read_u32(&pdev->dev, "clock-frequency", + &priv->tclk)) { + dev_err(&pdev->dev, "missing clock-frequency value\n"); + return -EINVAL; + } + + if (priv->hw_version == MVPP22) { + err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); + if (err) + goto err_axi_clk; + /* Sadly, the BM pools all share the same register to + * store the high 32 bits of their address. So they + * must all have the same high 32 bits, which forces + * us to restrict coherent memory to DMA_BIT_MASK(32). + */ + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) + goto err_axi_clk; + } + + /* Initialize network controller */ + err = mvpp2_init(pdev, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + goto err_axi_clk; + } + + /* Initialize ports */ + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + err = mvpp2_port_probe(pdev, port_fwnode, priv); + if (err < 0) + goto err_port_probe; + } + + if (priv->port_count == 0) { + dev_err(&pdev->dev, "no ports enabled\n"); + err = -ENODEV; + goto err_axi_clk; + } + + /* Statistics must be gathered regularly because some of them (like + * packets counters) are 32-bit registers and could overflow quite + * quickly. For instance, a 10Gb link used at full bandwidth with the + * smallest packets (64B) will overflow a 32-bit counter in less than + * 30 seconds. Then, use a workqueue to fill 64-bit counters. + */ + snprintf(priv->queue_name, sizeof(priv->queue_name), + "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), + priv->port_count > 1 ? "+" : ""); + priv->stats_queue = create_singlethread_workqueue(priv->queue_name); + if (!priv->stats_queue) { + err = -ENOMEM; + goto err_port_probe; + } + + platform_set_drvdata(pdev, priv); + return 0; + +err_port_probe: + i = 0; + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + if (priv->port_list[i]) + mvpp2_port_remove(priv->port_list[i]); + i++; + } +err_axi_clk: + clk_disable_unprepare(priv->axi_clk); + +err_mg_core_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_core_clk); +err_mg_clk: + if (priv->hw_version == MVPP22) + clk_disable_unprepare(priv->mg_clk); +err_gop_clk: + clk_disable_unprepare(priv->gop_clk); +err_pp_clk: + clk_disable_unprepare(priv->pp_clk); + return err; +} + +static int mvpp2_remove(struct platform_device *pdev) +{ + struct mvpp2 *priv = platform_get_drvdata(pdev); + struct fwnode_handle *fwnode = pdev->dev.fwnode; + struct fwnode_handle *port_fwnode; + int i = 0; + + flush_workqueue(priv->stats_queue); + destroy_workqueue(priv->stats_queue); + + fwnode_for_each_available_child_node(fwnode, port_fwnode) { + if (priv->port_list[i]) { + mutex_destroy(&priv->port_list[i]->gather_stats_lock); + mvpp2_port_remove(priv->port_list[i]); + } + i++; + } + + for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { + struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvpp2_bm_pool_destroy(pdev, priv, bm_pool); + } + + for_each_present_cpu(i) { + struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i]; + + dma_free_coherent(&pdev->dev, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + aggr_txq->descs, + aggr_txq->descs_dma); + } + + if (is_acpi_node(port_fwnode)) + return 0; + + clk_disable_unprepare(priv->axi_clk); + clk_disable_unprepare(priv->mg_core_clk); + clk_disable_unprepare(priv->mg_clk); + clk_disable_unprepare(priv->pp_clk); + clk_disable_unprepare(priv->gop_clk); + + return 0; +} + +static const struct of_device_id mvpp2_match[] = { + { + .compatible = "marvell,armada-375-pp2", + .data = (void *)MVPP21, + }, + { + .compatible = "marvell,armada-7k-pp22", + .data = (void *)MVPP22, + }, + { } +}; +MODULE_DEVICE_TABLE(of, mvpp2_match); + +static const struct acpi_device_id mvpp2_acpi_match[] = { + { "MRVL0110", MVPP22 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match); + +static struct platform_driver mvpp2_driver = { + .probe = mvpp2_probe, + .remove = mvpp2_remove, + .driver = { + .name = MVPP2_DRIVER_NAME, + .of_match_table = mvpp2_match, + .acpi_match_table = ACPI_PTR(mvpp2_acpi_match), + }, +}; + +module_platform_driver(mvpp2_driver); + +MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com"); +MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c new file mode 100644 index 000000000000..6bb69f086794 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -0,0 +1,2467 @@ +/* + * Header Parser helpers for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <uapi/linux/ppp_defs.h> +#include <net/ip.h> +#include <net/ipv6.h> + +#include "mvpp2.h" +#include "mvpp2_prs.h" + +/* Update parser tcam and sram hw entries */ +static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + int i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + /* Clear entry invalidation bit */ + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + + return 0; +} + +/* Initialize tcam entry from hw */ +static int mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) +{ + int i; + + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return -EINVAL; + + memset(pe, 0, sizeof(*pe)); + pe->index = tid; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); + if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + return MVPP2_PRS_TCAM_ENTRY_INVALID; + + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + + return 0; +} + +/* Invalidate tcam hw entry */ +static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) +{ + /* Write index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), + MVPP2_PRS_TCAM_INV_MASK); +} + +/* Enable shadow table entry and set its lookup ID */ +static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) +{ + priv->prs_shadow[index].valid = true; + priv->prs_shadow[index].lu = lu; +} + +/* Update ri fields in shadow table entry */ +static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, + unsigned int ri, unsigned int ri_mask) +{ + priv->prs_shadow[index].ri_mask = ri_mask; + priv->prs_shadow[index].ri = ri; +} + +/* Update lookup field in tcam sw entry */ +static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; + pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; +} + +/* Update mask for single port in tcam sw entry */ +static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, + unsigned int port, bool add) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + if (add) + pe->tcam.byte[enable_off] &= ~(1 << port); + else + pe->tcam.byte[enable_off] |= 1 << port; +} + +/* Update port map in tcam sw entry */ +static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, + unsigned int ports) +{ + unsigned char port_mask = MVPP2_PRS_PORT_MASK; + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; + pe->tcam.byte[enable_off] &= ~port_mask; + pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; +} + +/* Obtain port map from tcam sw entry */ +static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +{ + int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; +} + +/* Set byte of data and its enable bits in tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char byte, + unsigned char enable) +{ + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; +} + +/* Get byte of data and its enable bits from tcam sw entry */ +static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + unsigned int offs, unsigned char *byte, + unsigned char *enable) +{ + *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; + *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; +} + +/* Compare tcam data bytes with a pattern */ +static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, + u16 data) +{ + int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); + u16 tcam_data; + + tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; + if (tcam_data != data) + return false; + return true; +} + +/* Update ai bits in tcam sw entry */ +static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int enable) +{ + int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + + for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { + if (!(enable & BIT(i))) + continue; + + if (bits & BIT(i)) + pe->tcam.byte[ai_idx] |= 1 << i; + else + pe->tcam.byte[ai_idx] &= ~(1 << i); + } + + pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; +} + +/* Get ai bits from tcam sw entry */ +static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) +{ + return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; +} + +/* Set ethertype in tcam sw entry */ +static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, + unsigned short ethertype) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); +} + +/* Set vid in tcam sw entry */ +static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset, + unsigned short vid) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff); +} + +/* Set bits in sram sw entry */ +static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); +} + +/* Clear bits in sram sw entry */ +static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, + int val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); +} + +/* Update ri bits in sram sw entry */ +static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + + for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { + int ri_off = MVPP2_PRS_SRAM_RI_OFFS; + + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); + } +} + +/* Obtain ri bits from sram sw entry */ +static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) +{ + return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; +} + +/* Update ai bits in sram sw entry */ +static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, + unsigned int bits, unsigned int mask) +{ + unsigned int i; + int ai_off = MVPP2_PRS_SRAM_AI_OFFS; + + for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); + } +} + +/* Read ai bits from sram sw entry */ +static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) +{ + u8 bits; + int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); + int ai_en_off = ai_off + 1; + int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + + bits = (pe->sram.byte[ai_off] >> ai_shift) | + (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + + return bits; +} + +/* In sram sw entry set lookup ID field of the tcam key to be used in the next + * lookup interation + */ +static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, + unsigned int lu) +{ + int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; + + mvpp2_prs_sram_bits_clear(pe, sram_next_off, + MVPP2_PRS_SRAM_NEXT_LU_MASK); + mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); +} + +/* In the sram sw entry set sign and value of the next lookup offset + * and the offset value generated to the classifier + */ +static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, + unsigned int op) +{ + /* Set sign */ + if (shift < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + shift = 0 - shift; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + } + + /* Set value */ + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = + (unsigned char)shift; + + /* Reset and set operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* In the sram sw entry set sign and value of the user defined offset + * generated to the classifier + */ +static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, + unsigned int type, int offset, + unsigned int op) +{ + /* Set sign */ + if (offset < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + offset = 0 - offset; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + } + + /* Set value */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, + MVPP2_PRS_SRAM_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] |= + (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + + /* Set offset type */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, + MVPP2_PRS_SRAM_UDF_TYPE_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); + + /* Set offset operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> + (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= + (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* Find parser flow entry */ +static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ + for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { + u8 bits; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + bits = mvpp2_prs_sram_ai_get(&pe); + + /* Sram store classification lookup ID in AI bits [5:0] */ + if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) + return tid; + } + + return -ENOENT; +} + +/* Return first free tcam index, seeking from start to end */ +static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, + unsigned char end) +{ + int tid; + + if (start > end) + swap(start, end); + + if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) + end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; + + for (tid = start; tid <= end; tid++) { + if (!priv->prs_shadow[tid].valid) + return tid; + } + + return -EINVAL; +} + +/* Enable/disable dropping all mac da's */ +static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) +{ + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_DROP_ALL; + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set port to unicast or multicast promiscuous mode */ +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + struct mvpp2_prs_entry pe; + unsigned char cast_match; + unsigned int ri; + int tid; + + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { + cast_match = MVPP2_PRS_UCAST_VAL; + tid = MVPP2_PE_MAC_UC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_UCAST; + } else { + cast_match = MVPP2_PRS_MCAST_VAL; + tid = MVPP2_PE_MAC_MC_PROMISCUOUS; + ri = MVPP2_PRS_RI_L2_MCAST; + } + + /* promiscuous mode - Accept unknown unicast or multicast packets */ + if (priv->prs_shadow[tid].valid) { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = tid; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK); + + /* Match UC or MC addresses */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match, + MVPP2_PRS_CAST_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa packets */ +static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, + bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift; + + if (extend) { + tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + + /* Set ai bits for next iteration */ + if (extend) + mvpp2_prs_sram_ai_update(&pe, 1, + MVPP2_PRS_SRAM_AI_MASK); + else + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + + /* Set result info bits to 'single vlan' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + /* If packet is tagged continue check vid filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + } else { + /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa ethertype */ +static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, + bool add, bool tagged, bool extend) +{ + struct mvpp2_prs_entry pe; + int tid, shift, port_mask; + + if (extend) { + tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : + MVPP2_PE_ETYPE_EDSA_UNTAGGED; + port_mask = 0; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : + MVPP2_PE_ETYPE_DSA_UNTAGGED; + port_mask = MVPP2_PRS_PORT_MASK; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + mvpp2_prs_init_from_hw(priv, &pe, tid); + } else { + /* Entry doesn't exist - create new */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Set ethertype */ + mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA); + mvpp2_prs_match_etype(&pe, 2, 0); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, + MVPP2_PRS_RI_DSA_MASK); + /* Shift ethertype + 2 byte reserved + tag*/ + mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, + MVPP2_ETH_TYPE_LEN + 2 + 3, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + /* Mask/unmask all ports, depending on dsa type */ + mvpp2_prs_tcam_port_map_set(&pe, port_mask); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Search for existing single/triple vlan entry */ +static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_bits, ai_bits; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid)); + if (!match) + continue; + + /* Get vlan type */ + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + + /* Get current ai value from tcam */ + ai_bits = mvpp2_prs_tcam_ai_get(&pe); + /* Clear double vlan bit */ + ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; + + if (ai != ai_bits) + continue; + + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + return tid; + } + + return -ENOENT; +} + +/* Add/update single/triple vlan entry */ +static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, + unsigned int port_map) +{ + struct mvpp2_prs_entry pe; + int tid_aux, tid; + int ret = 0; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_vlan_find(priv, tpid, ai); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + /* Get last double vlan tid */ + for (tid_aux = MVPP2_PE_LAST_FREE_TID; + tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == + MVPP2_PRS_RI_VLAN_DOUBLE) + break; + } + + if (tid <= tid_aux) + return -EINVAL; + + memset(&pe, 0, sizeof(pe)); + pe.index = tid; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + mvpp2_prs_match_etype(&pe, 0, tpid); + + /* VLAN tag detected, proceed with VID filtering */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + } else { + ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE, + MVPP2_PRS_RI_VLAN_MASK); + } + mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* Get first free double vlan ai number */ +static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) +{ + int i; + + for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { + if (!priv->prs_double_vlans[i]) + return i; + } + + return -EINVAL; +} + +/* Search for existing double vlan entry */ +static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + unsigned int ri_mask; + bool match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) && + mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2)); + + if (!match) + continue; + + ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK; + if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) + return tid; + } + + return -ENOENT; +} + +/* Add or update double vlan entry */ +static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, + unsigned short tpid2, + unsigned int port_map) +{ + int tid_aux, tid, ai, ret = 0; + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + + if (tid < 0) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + /* Set ai value for new double vlan entry */ + ai = mvpp2_prs_double_vlan_ai_free_get(priv); + if (ai < 0) + return ai; + + /* Get first single/triple vlan tid */ + for (tid_aux = MVPP2_PE_FIRST_FREE_TID; + tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { + unsigned int ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + ri_bits = mvpp2_prs_sram_ri_get(&pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + break; + } + + if (tid >= tid_aux) + return -ERANGE; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = tid; + + priv->prs_double_vlans[ai] = true; + + mvpp2_prs_match_etype(&pe, 0, tpid1); + mvpp2_prs_match_etype(&pe, 4, tpid2); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + /* Shift 4 bytes - skip outer vlan tag */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(&pe, port_map); + mvpp2_prs_hw_write(priv, &pe); + + return ret; +} + +/* IPv4 header parsing for fragmentation and L4 offset */ +static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_IGMP)) + return -EINVAL; + + /* Not fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK_L); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, + MVPP2_PRS_TCAM_PROTO_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + + mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, + ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv4 L3 multicast or broadcast */ +static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int mask, tid; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + switch (l3_cast) { + case MVPP2_PRS_L3_MULTI_CAST: + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, + MVPP2_PRS_IPV4_MC_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + case MVPP2_PRS_L3_BROAD_CAST: + mask = MVPP2_PRS_IPV4_BC_MASK; + mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + default: + return -EINVAL; + } + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for protocols over IPv6 */ +static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto, + unsigned int ri, unsigned int ri_mask) +{ + struct mvpp2_prs_entry pe; + int tid; + + if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) && + (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 6, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Write HW */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv6 L3 multicast entry */ +static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast) +{ + struct mvpp2_prs_entry pe; + int tid; + + if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) + return -EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPv6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, + MVPP2_PRS_IPV6_MC_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser per-port initialization */ +static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, + int lu_max, int offset) +{ + u32 val; + + /* Set lookup ID */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); + val &= ~MVPP2_PRS_PORT_LU_MASK(port); + val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); + mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); + + /* Set maximum number of loops for packet received from port */ + val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); + val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); + val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); + mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); + + /* Set initial offset for packet header extraction for the first + * searching loop + */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); + val &= ~MVPP2_PRS_INIT_OFF_MASK(port); + val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); + mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); +} + +/* Default flow entries initialization for all ports */ +static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_hw_write(priv, &pe); + } +} + +/* Set default entry for Marvell Header field */ +static void mvpp2_prs_mh_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + pe.index = MVPP2_PE_MH_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); + mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set default entires (place holder) for promiscuous, non-promiscuous and + * multicast MAC addresses + */ +static void mvpp2_prs_mac_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + /* Create dummy entries for drop all and promiscuous modes */ + mvpp2_prs_mac_drop_all_set(priv, 0, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); +} + +/* Set default entries for various types of dsa packets */ +static void mvpp2_prs_dsa_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + /* None tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_EDSA); + + /* Tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_DSA); + + /* Tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* None tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + + /* Tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + + /* Tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* Set default entry, in case DSA or EDSA tag not found */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = MVPP2_PE_DSA_DEFAULT; + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + /* Shift 0 bytes */ + mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Clear all sram ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Initialize parser entries for VID filtering */ +static void mvpp2_prs_vid_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 4 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vid entry for extended DSA*/ + memset(&pe, 0, sizeof(pe)); + + /* Set default vid entry */ + pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT, + MVPP2_PRS_EDSA_VID_AI_BIT); + + /* Skip VLAN header - Set offset to 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Match basic ethertypes */ +static int mvpp2_prs_etype_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Ethertype: PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES); + + mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: ARP */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: LBTD */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Clear tcam data before updating */ + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD, + MVPP2_PRS_IPV4_HEAD_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv6 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6); + + /* Skip DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = false; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = MVPP2_PE_ETH_TYPE_UN; + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset even it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = true; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Configure vlan entries and detect up to 2 successive VLAN tags. + * Possible options: + * 0x8100, 0x88A8 + * 0x8100, 0x8100 + * 0x8100 + * 0x88A8 + */ +static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + /* Double VLAN: 0x8100, 0x88A8 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Double VLAN: 0x8100, 0x8100 */ + err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x88a8 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x8100 */ + err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Set default double vlan entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_DBL; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Clear ai for next iterations */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_DBL_VLAN_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vlan none entry */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_NONE; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for PPPoE ethertype */ +static int mvpp2_prs_pppoe_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* IPv4 over PPPoE with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IP); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv4 over PPPoE without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv6 over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, PPP_IPV6); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IPv6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* Non-IP over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + /* Set L3 offset even if it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv4 */ +static int mvpp2_prs_ip4_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int err; + + /* Set entries for TCP, UDP and IGMP over IPv4 */ + err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 Broadcast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); + if (err) + return err; + + /* IPv4 Multicast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Default IPv4 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_PROTO_UN; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv4 entry for unicast address */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_ADDR_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv6 */ +static int mvpp2_prs_ip6_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + int tid, err; + + /* Set entries for TCP, UDP and ICMP over IPv6 */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP, + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP, + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ + /* Result Info: UDF7=1, DS lite */ + err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP, + MVPP2_PRS_RI_UDF7_IP6_LITE, + MVPP2_PRS_RI_UDF7_MASK); + if (err) + return err; + + /* IPv6 multicast */ + err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Entry for checking hop limit */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | + MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_L3_PROTO_MASK | + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown protocols */ + memset(&pe, 0, sizeof(pe)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + /* Set L4 offset relatively to our current place */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(struct ipv6hdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown ext protocols */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, + MVPP2_PRS_IPV6_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unicast address */ + memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_ADDR_UN; + + /* Finished: go to IPv6 again */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPV6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Find tcam entry with matched pair <vid,port> */ +static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid, + u16 mask) +{ + unsigned char byte[2], enable[2]; + struct mvpp2_prs_entry pe; + u16 rvid, rmask; + int tid; + + /* Go through the all entries with MVPP2_PRS_LU_VID */ + for (tid = MVPP2_PE_VID_FILT_RANGE_START; + tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) { + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); + mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); + + rvid = ((byte[0] & 0xf) << 8) + byte[1]; + rmask = ((enable[0] & 0xf) << 8) + enable[1]; + + if (rvid != vid || rmask != mask) + continue; + + return tid; + } + + return -ENOENT; +} + +/* Write parser entry for VID filtering */ +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) +{ + unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START + + port->id * MVPP2_PRS_VLAN_FILT_MAX; + unsigned int mask = 0xfff, reg_val, shift; + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask); + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + /* No such entry */ + if (tid < 0) { + + /* Go through all entries from first to last in vlan range */ + tid = mvpp2_prs_tcam_first_free(priv, vid_start, + vid_start + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY); + + /* There isn't room for a new VID filter */ + if (tid < 0) + return tid; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + /* Enable the current port */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Set match on VID */ + mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Write parser entry for VID filtering */ +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) +{ + struct mvpp2 *priv = port->priv; + int tid; + + /* Scan TCAM and see if entry with this <vid,port> already exist */ + tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff); + + /* No such entry */ + if (tid < 0) + return; + + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; +} + +/* Remove all existing VID filters on this port */ +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int tid; + + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { + if (priv->prs_shadow[tid].valid) + mvpp2_prs_vid_entry_remove(port, tid); + } +} + +/* Remove VID filering entry for this port */ +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + + /* Invalidate the guard entry */ + mvpp2_prs_hw_inv(priv, tid); + + priv->prs_shadow[tid].valid = false; +} + +/* Add guard entry that drops packets when no VID is matched on this port */ +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) +{ + unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); + struct mvpp2 *priv = port->priv; + unsigned int reg_val, shift; + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[tid].valid) + return; + + memset(&pe, 0, sizeof(pe)); + + pe.index = tid; + + reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); + if (reg_val & MVPP2_DSA_EXTENDED) + shift = MVPP2_VLAN_TAG_EDSA_LEN; + else + shift = MVPP2_VLAN_TAG_LEN; + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, true); + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + + /* Skip VLAN header - Set offset to 4 or 8 bytes */ + mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Drop VLAN packets that don't belong to any VIDs on this port */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Parser default initialization */ +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) +{ + int err, index, i; + + /* Enable tcam table */ + mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); + + /* Clear all tcam and sram entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); + + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); + } + + /* Invalidate all tcam entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) + mvpp2_prs_hw_inv(priv, index); + + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + /* Always start from lookup = 0 */ + for (index = 0; index < MVPP2_MAX_PORTS; index++) + mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, + MVPP2_PRS_PORT_LU_MAX, 0); + + mvpp2_prs_def_flow_init(priv); + + mvpp2_prs_mh_init(priv); + + mvpp2_prs_mac_init(priv); + + mvpp2_prs_dsa_init(priv); + + mvpp2_prs_vid_init(priv); + + err = mvpp2_prs_etype_init(priv); + if (err) + return err; + + err = mvpp2_prs_vlan_init(pdev, priv); + if (err) + return err; + + err = mvpp2_prs_pppoe_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip6_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip4_init(priv); + if (err) + return err; + + return 0; +} + +/* Compare MAC DA with tcam entry data */ +static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, + const u8 *da, unsigned char *mask) +{ + unsigned char tcam_byte, tcam_mask; + int index; + + for (index = 0; index < ETH_ALEN; index++) { + mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); + if (tcam_mask != mask[index]) + return false; + + if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) + return false; + } + + return true; +} + +/* Find tcam entry with matched pair <MAC DA, port> */ +static int +mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, + unsigned char *mask, int udf_type) +{ + struct mvpp2_prs_entry pe; + int tid; + + /* Go through the all entires with MVPP2_PRS_LU_MAC */ + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned int entry_pmap; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != udf_type)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); + + if (mvpp2_prs_mac_range_equals(&pe, da, mask) && + entry_pmap == pmap) + return tid; + } + + return -ENOENT; +} + +/* Update parser's mac da entry */ +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct mvpp2 *priv = port->priv; + unsigned int pmap, len, ri; + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ + tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask, + MVPP2_PRS_UDF_MAC_DEF); + + /* No such entry */ + if (tid < 0) { + if (!add) + return 0; + + /* Create new TCAM entry */ + /* Go through the all entries from first to last */ + tid = mvpp2_prs_tcam_first_free(priv, + MVPP2_PE_MAC_RANGE_START, + MVPP2_PE_MAC_RANGE_END); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } else { + mvpp2_prs_init_from_hw(priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port->id, add); + + /* Invalidate the entry if no ports are left enabled */ + pmap = mvpp2_prs_tcam_port_map_get(&pe); + if (pmap == 0) { + if (add) + return -EINVAL; + + mvpp2_prs_hw_inv(priv, pe.index); + priv->prs_shadow[pe.index].valid = false; + return 0; + } + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set match on DA */ + len = ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff); + + /* Set result info bits */ + if (is_broadcast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_BCAST; + } else if (is_multicast_ether_addr(da)) { + ri = MVPP2_PRS_RI_L2_MCAST; + } else { + ri = MVPP2_PRS_RI_L2_UCAST; + + if (ether_addr_equal(da, port->dev->dev_addr)) + ri |= MVPP2_PRS_RI_MAC_ME_MASK; + } + + mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table and hw entry */ + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) +{ + struct mvpp2_port *port = netdev_priv(dev); + int err; + + /* Remove old parser entry */ + err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false); + if (err) + return err; + + /* Add new parser entry */ + err = mvpp2_prs_mac_da_accept(port, da, true); + if (err) + return err; + + /* Set addr in the device */ + ether_addr_copy(dev->dev_addr, da); + + return 0; +} + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + struct mvpp2_prs_entry pe; + unsigned long pmap; + int index, tid; + + for (tid = MVPP2_PE_MAC_RANGE_START; + tid <= MVPP2_PE_MAC_RANGE_END; tid++) { + unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) + continue; + + mvpp2_prs_init_from_hw(priv, &pe, tid); + + pmap = mvpp2_prs_tcam_port_map_get(&pe); + + /* We only want entries active on this port */ + if (!test_bit(port->id, &pmap)) + continue; + + /* Read mac addr from entry */ + for (index = 0; index < ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + /* Special cases : Don't remove broadcast and port's own + * address + */ + if (is_broadcast_ether_addr(da) || + ether_addr_equal(da, port->dev->dev_addr)) + continue; + + /* Remove entry from TCAM */ + mvpp2_prs_mac_da_accept(port, da, false); + } +} + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) +{ + switch (type) { + case MVPP2_TAG_TYPE_EDSA: + /* Add port to EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + /* Remove port from DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + break; + + case MVPP2_TAG_TYPE_DSA: + /* Add port to DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, true, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + /* Remove port from EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + case MVPP2_TAG_TYPE_MH: + case MVPP2_TAG_TYPE_NONE: + /* Remove port form EDSA and DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, false, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + default: + if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) + return -EINVAL; + } + + return 0; +} + +/* Set prs flow for the port */ +int mvpp2_prs_def_flow(struct mvpp2_port *port) +{ + struct mvpp2_prs_entry pe; + int tid; + + memset(&pe, 0, sizeof(pe)); + + tid = mvpp2_prs_flow_find(port->priv, port->id); + + /* Such entry not exist */ + if (tid < 0) { + /* Go through the all entires from last to first */ + tid = mvpp2_prs_tcam_first_free(port->priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table */ + mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); + } else { + mvpp2_prs_init_from_hw(port->priv, &pe, tid); + } + + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, &pe); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h new file mode 100644 index 000000000000..22fbbc4c8b28 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h @@ -0,0 +1,314 @@ +/* + * Header Parser definitions for Marvell PPv2 Network Controller + * + * Copyright (C) 2014 Marvell + * + * Marcin Wojtas <mw@semihalf.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#include <linux/kernel.h> +#include <linux/netdevice.h> + +#include "mvpp2.h" + +#ifndef _MVPP2_PRS_H_ +#define _MVPP2_PRS_H_ + +/* Parser constants */ +#define MVPP2_PRS_TCAM_SRAM_SIZE 256 +#define MVPP2_PRS_TCAM_WORDS 6 +#define MVPP2_PRS_SRAM_WORDS 4 +#define MVPP2_PRS_FLOW_ID_SIZE 64 +#define MVPP2_PRS_FLOW_ID_MASK 0x3f +#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 +#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) +#define MVPP2_PRS_IPV4_HEAD 0x40 +#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 +#define MVPP2_PRS_IPV4_MC 0xe0 +#define MVPP2_PRS_IPV4_MC_MASK 0xf0 +#define MVPP2_PRS_IPV4_BC_MASK 0xff +#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MASK 0xf +#define MVPP2_PRS_IPV6_MC 0xff +#define MVPP2_PRS_IPV6_MC_MASK 0xff +#define MVPP2_PRS_IPV6_HOP_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f +#define MVPP2_PRS_DBL_VLANS_MAX 100 +#define MVPP2_PRS_CAST_MASK BIT(0) +#define MVPP2_PRS_MCAST_VAL BIT(0) +#define MVPP2_PRS_UCAST_VAL 0x0 + +/* Tcam structure: + * - lookup ID - 4 bits + * - port ID - 1 byte + * - additional information - 1 byte + * - header data - 8 bytes + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). + */ +#define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_PORT_MASK 0xff +#define MVPP2_PRS_LU_MASK 0xf +#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ + (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) +#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ + (((offs) * 2) - ((offs) % 2) + 2) +#define MVPP2_PRS_TCAM_AI_BYTE 16 +#define MVPP2_PRS_TCAM_PORT_BYTE 17 +#define MVPP2_PRS_TCAM_LU_BYTE 20 +#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) +#define MVPP2_PRS_TCAM_INV_WORD 5 + +#define MVPP2_PRS_VID_TCAM_BYTE 2 + +/* TCAM range for unicast and multicast filtering. We have 25 entries per port, + * with 4 dedicated to UC filtering and the rest to multicast filtering. + * Additionnally we reserve one entry for the broadcast address, and one for + * each port's own address. + */ +#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25 +#define MVPP2_PRS_MAC_RANGE_SIZE 80 + +/* Number of entries per port dedicated to UC and MC filtering */ +#define MVPP2_PRS_MAC_UC_FILT_MAX 4 +#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \ + MVPP2_PRS_MAC_UC_FILT_MAX) + +/* There is a TCAM range reserved for VLAN filtering entries, range size is 33 + * 10 VLAN ID filter entries per port + * 1 default VLAN filter entry per port + * It is assumed that there are 3 ports for filter, not including loopback port + */ +#define MVPP2_PRS_VLAN_FILT_MAX 11 +#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33 + +#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2) +#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1) + +/* Tcam entries ID */ +#define MVPP2_PE_DROP_ALL 0 +#define MVPP2_PE_FIRST_FREE_TID 1 + +/* MAC filtering range */ +#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1) +#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \ + MVPP2_PRS_MAC_RANGE_SIZE + 1) +/* VLAN filtering range */ +#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \ + MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1) +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1) +#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +/* reserved */ +#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) + +#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \ + ((port) * MVPP2_PRS_VLAN_FILT_MAX)) +#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_MAX_ENTRY) +/* Index of default vid filter for given port */ +#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \ + + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY) + +/* Sram structure + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). + */ +#define MVPP2_PRS_SRAM_RI_OFFS 0 +#define MVPP2_PRS_SRAM_RI_WORD 0 +#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 +#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 +#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 +#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 +#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_UDF_OFFS 73 +#define MVPP2_PRS_SRAM_UDF_BITS 8 +#define MVPP2_PRS_SRAM_UDF_MASK 0xff +#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 +#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 +#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 +#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 +#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 +#define MVPP2_PRS_SRAM_AI_OFFS 90 +#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 +#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 +#define MVPP2_PRS_SRAM_AI_MASK 0xff +#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 +#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf +#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 +#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 + +/* Sram result info bits assignment */ +#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 +#define MVPP2_PRS_RI_DSA_MASK 0x2 +#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_NONE 0x0 +#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) +#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) +#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 +#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) +#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_UCAST 0x0 +#define MVPP2_PRS_RI_L2_MCAST BIT(9) +#define MVPP2_PRS_RI_L2_BCAST BIT(10) +#define MVPP2_PRS_RI_PPPOE_MASK 0x800 +#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_UN 0x0 +#define MVPP2_PRS_RI_L3_IP4 BIT(12) +#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) +#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) +#define MVPP2_PRS_RI_L3_IP6 BIT(14) +#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) +#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_UCAST 0x0 +#define MVPP2_PRS_RI_L3_MCAST BIT(15) +#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 +#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) +#define MVPP2_PRS_RI_UDF3_MASK 0x300000 +#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) +#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 +#define MVPP2_PRS_RI_L4_TCP BIT(22) +#define MVPP2_PRS_RI_L4_UDP BIT(23) +#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) +#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 +#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) +#define MVPP2_PRS_RI_DROP_MASK 0x80000000 + +/* Sram additional info bits assignment */ +#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) +#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) +#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) +#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) +#define MVPP2_PRS_SINGLE_VLAN_AI 0 +#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) +#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0) + +/* DSA/EDSA type */ +#define MVPP2_PRS_TAGGED true +#define MVPP2_PRS_UNTAGGED false +#define MVPP2_PRS_EDSA true +#define MVPP2_PRS_DSA false + +/* MAC entries, shadow udf */ +enum mvpp2_prs_udf { + MVPP2_PRS_UDF_MAC_DEF, + MVPP2_PRS_UDF_MAC_RANGE, + MVPP2_PRS_UDF_L2_DEF, + MVPP2_PRS_UDF_L2_DEF_COPY, + MVPP2_PRS_UDF_L2_USER, +}; + +/* Lookup ID */ +enum mvpp2_prs_lookup { + MVPP2_PRS_LU_MH, + MVPP2_PRS_LU_MAC, + MVPP2_PRS_LU_DSA, + MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_VID, + MVPP2_PRS_LU_L2, + MVPP2_PRS_LU_PPPOE, + MVPP2_PRS_LU_IP4, + MVPP2_PRS_LU_IP6, + MVPP2_PRS_LU_FLOWS, + MVPP2_PRS_LU_LAST, +}; + +union mvpp2_prs_tcam_entry { + u32 word[MVPP2_PRS_TCAM_WORDS]; + u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; +}; + +union mvpp2_prs_sram_entry { + u32 word[MVPP2_PRS_SRAM_WORDS]; + u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; +}; + +struct mvpp2_prs_entry { + u32 index; + union mvpp2_prs_tcam_entry tcam; + union mvpp2_prs_sram_entry sram; +}; + +struct mvpp2_prs_shadow { + bool valid; + bool finish; + + /* Lookup ID */ + int lu; + + /* User defined offset */ + int udf; + + /* Result info */ + u32 ri; + u32 ri_mask; +}; + +int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv); + +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add); + +int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type); + +int mvpp2_prs_def_flow(struct mvpp2_port *port); + +void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port); + +void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port); + +int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid); + +void mvpp2_prs_vid_remove_all(struct mvpp2_port *port); + +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add); + +void mvpp2_prs_mac_del_all(struct mvpp2_port *port); + +int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da); + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e0b72bf428f7..d8ebf0a05e0c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2503,7 +2503,6 @@ static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; - const struct of_device_id *match; struct mtk_eth *eth; int err; int i; @@ -2512,8 +2511,7 @@ static int mtk_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; - match = of_match_device(of_mtk_match, &pdev->dev); - eth->soc = (struct mtk_soc_data *)match->data; + eth->soc = of_device_get_match_data(&pdev->dev); eth->dev = &pdev->dev; eth->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 5c613c6663da..9f54ccbddea7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -593,30 +593,25 @@ static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb, } #if IS_ENABLED(CONFIG_IPV6) -/* In IPv6 packets, besides subtracting the pseudo header checksum, - * we also compute/add the IP header checksum which - * is not added by the HW. +/* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header: + * 4 first bytes : priority, version, flow_lbl + * and 2 additional bytes : nexthdr, hop_limit. */ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, struct ipv6hdr *ipv6h) { __u8 nexthdr = ipv6h->nexthdr; - __wsum csum_pseudo_hdr = 0; + __wsum temp; if (unlikely(nexthdr == IPPROTO_FRAGMENT || nexthdr == IPPROTO_HOPOPTS || nexthdr == IPPROTO_SCTP)) return -1; - hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(nexthdr)); - csum_pseudo_hdr = csum_partial(&ipv6h->saddr, - sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, (__force __wsum)ipv6h->payload_len); - csum_pseudo_hdr = csum_add(csum_pseudo_hdr, - (__force __wsum)htons(nexthdr)); - - skb->csum = csum_sub(hw_checksum, csum_pseudo_hdr); - skb->csum = csum_add(skb->csum, csum_partial(ipv6h, sizeof(struct ipv6hdr), 0)); + /* priority, version, flow_lbl */ + temp = csum_add(hw_checksum, *(__wsum *)ipv6h); + /* nexthdr and hop_limit */ + skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr); return 0; } #endif @@ -775,8 +770,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud act = bpf_prog_run_xdp(xdp_prog, &xdp); + length = xdp.data_end - xdp.data; if (xdp.data != orig_data) { - length = xdp.data_end - xdp.data; frags[0].page_offset = xdp.data - xdp.data_hard_start; va = xdp.data; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 6b6853773848..0227786308af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -694,7 +694,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, u16 rings_p_up = priv->num_tx_rings_p_up; if (netdev_get_num_tc(dev)) - return skb_tx_hash(dev, skb); + return fallback(dev, skb); return fallback(dev, skb) % rings_p_up; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index de6b3d416148..46dcbfbe4c5e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -165,6 +165,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [36] = "QinQ VST mode support", [37] = "sl to vl mapping table change event support", [38] = "user MAC support", + [39] = "Report driver version to FW support", }; int i; @@ -1038,6 +1039,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; if (field32 & (1 << 7)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; + if (field32 & (1 << 8)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW; MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); if (field32 & (1 << 17)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; @@ -1860,6 +1863,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 +#define INIT_HCA_DRIVER_VERSION_OFFSET 0x140 +#define INIT_HCA_DRIVER_VERSION_SZ 0x40 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) @@ -1950,6 +1955,13 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) { + u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4); + + strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1); + mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst); + } + /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 685337d58276..5342bd8a3d0b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -43,12 +43,13 @@ #include "fw.h" /* - * We allocate in page size (default 4KB on many archs) chunks to avoid high - * order memory allocations in fragmented/high usage memory situation. + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. Note that the chunks are not necessarily in contiguous + * physical memory. */ enum { - MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, - MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, + MLX4_ICM_ALLOC_SIZE = 1 << 18, + MLX4_TABLE_CHUNK_SIZE = 1 << 18, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) @@ -135,6 +136,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, struct mlx4_icm *icm; struct mlx4_icm_chunk *chunk = NULL; int cur_order; + gfp_t mask; int ret; /* We use sg_set_buf for coherent allocs, which assumes low memory */ @@ -178,13 +180,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, while (1 << cur_order > npages) --cur_order; + mask = gfp_mask; + if (cur_order) + mask &= ~__GFP_DIRECT_RECLAIM; + if (coherent) ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev, &chunk->mem[chunk->npages], - cur_order, gfp_mask); + cur_order, mask); else ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages], - cur_order, gfp_mask, + cur_order, mask, dev->numa_node); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 60172a38c4a4..0a30d81aab3b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); static int msi_x = 1; module_param(msi_x, int, 0444); -MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); +MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x"); #else /* CONFIG_PCI_MSI */ @@ -2815,6 +2815,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) dev->caps.num_eqs - dev->caps.reserved_eqs, MAX_MSIX); + if (msi_x > 1) + nreq = min_t(int, nreq, msi_x); + entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); if (!entries) goto no_msi; @@ -4127,17 +4130,68 @@ static const struct pci_error_handlers mlx4_err_handler = { .resume = mlx4_pci_resume, }; +static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + + mlx4_err(dev, "suspend was called\n"); + mutex_lock(&persist->interface_state_mutex); + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + mlx4_unload_one(pdev); + mutex_unlock(&persist->interface_state_mutex); + + return 0; +} + +static int mlx4_resume(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int total_vfs; + int ret = 0; + + mlx4_err(dev, "resume was called\n"); + total_vfs = dev->persist->num_vfs; + memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); + + mutex_lock(&persist->interface_state_mutex); + if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { + ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, + nvfs, priv, 1); + if (!ret) { + ret = restore_current_port_types(dev, + dev->persist->curr_port_type, + dev->persist->curr_port_poss_type); + if (ret) + mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret); + } + } + mutex_unlock(&persist->interface_state_mutex); + + return ret; +} + static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, .shutdown = mlx4_shutdown, .remove = mlx4_remove_one, + .suspend = mlx4_suspend, + .resume = mlx4_resume, .err_handler = &mlx4_err_handler, }; static int __init mlx4_verify_params(void) { + if (msi_x < 0) { + pr_warn("mlx4_core: bad msi_x: %d\n", msi_x); + return -1; + } + if ((log_num_mac < 0) || (log_num_mac > 7)) { pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index c68da1986e51..cb9e923e8399 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -55,8 +55,8 @@ #include "fw_qos.h" #define DRV_NAME "mlx4_core" -#define PFX DRV_NAME ": " #define DRV_VERSION "4.0-0" +#define DRV_NAME_FOR_FW "Linux," DRV_NAME "," DRV_VERSION #define MLX4_FS_UDP_UC_EN (1 << 1) #define MLX4_FS_TCP_UC_EN (1 << 2) diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 3ef3406ff4cb..10fcc22f4590 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -614,9 +614,9 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int index_at_dup_port = -1; for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) { - if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))) + if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))) index_at_port = i; - if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))) + if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))) index_at_dup_port = i; } /* check that same vlan is not in the tables at different indices */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index c032319f1cb9..2545296a0c08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -30,6 +30,7 @@ config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE depends on IPV6=y || IPV6=n || MLX5_CORE=m + select PAGE_POOL default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. @@ -85,3 +86,15 @@ config MLX5_EN_IPSEC Build support for IPsec cryptography-offload accelaration in the NIC. Note: Support for hardware with this capability needs to be selected for this option to become available. + +config MLX5_EN_TLS + bool "TLS cryptography-offload accelaration" + depends on MLX5_CORE_EN + depends on TLS_DEVICE + depends on TLS=y || MLX5_CORE=m + depends on MLX5_ACCEL + default n + ---help--- + Build support for TLS cryptography-offload accelaration in the NIC. + Note: Support for hardware with this capability needs to be selected + for this option to become available. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index c805769d92a9..9efbf193ad5a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -8,24 +8,26 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ diag/fs_tracepoint.o -mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o +mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ - fpga/ipsec.o + fpga/ipsec.o fpga/tls.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ - en_arfs.o en_fs_ethtool.o en_selftest.o + en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o -mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o +mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o +mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o + CFLAGS_tracepoint.o := -I$(src) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c new file mode 100644 index 000000000000..77ac19f38cbe --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/mlx5/device.h> + +#include "accel/tls.h" +#include "mlx5_core.h" +#include "fpga/tls.h" + +int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid) +{ + return mlx5_fpga_tls_add_tx_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, p_swid); +} + +void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) +{ + mlx5_fpga_tls_del_tx_flow(mdev, swid, GFP_KERNEL); +} + +bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) +{ + return mlx5_fpga_is_tls_device(mdev); +} + +u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) +{ + return mlx5_fpga_tls_device_caps(mdev); +} + +int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) +{ + return mlx5_fpga_tls_init(mdev); +} + +void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) +{ + mlx5_fpga_tls_cleanup(mdev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h new file mode 100644 index 000000000000..6f9c9f446ecc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5_ACCEL_TLS_H__ +#define __MLX5_ACCEL_TLS_H__ + +#include <linux/mlx5/driver.h> +#include <linux/tls.h> + +#ifdef CONFIG_MLX5_ACCEL + +enum { + MLX5_ACCEL_TLS_TX = BIT(0), + MLX5_ACCEL_TLS_RX = BIT(1), + MLX5_ACCEL_TLS_V12 = BIT(2), + MLX5_ACCEL_TLS_V13 = BIT(3), + MLX5_ACCEL_TLS_LRO = BIT(4), + MLX5_ACCEL_TLS_IPV6 = BIT(5), + MLX5_ACCEL_TLS_AES_GCM128 = BIT(30), + MLX5_ACCEL_TLS_AES_GCM256 = BIT(31), +}; + +struct mlx5_ifc_tls_flow_bits { + u8 src_port[0x10]; + u8 dst_port[0x10]; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; + u8 ipv6[0x1]; + u8 direction_sx[0x1]; + u8 reserved_at_2[0x1e]; +}; + +int mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid); +void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid); +bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev); +u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev); +int mlx5_accel_tls_init(struct mlx5_core_dev *mdev); +void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev); + +#else + +static inline int +mlx5_accel_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid) { return 0; } +static inline void mlx5_accel_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid) { } +static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; } +static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; } +static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; } +static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { } + +#endif + +#endif /* __MLX5_ACCEL_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 21cd1703a862..487388aed98f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -135,6 +135,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) return cmd->cmd_buf + (idx << cmd->log_stride); } +static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) +{ + int size = msg->len; + int blen = size - min_t(int, sizeof(msg->first.data), size); + + return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE); +} + static u8 xor8_buf(void *buf, size_t offset, int len) { u8 *ptr = buf; @@ -174,10 +182,7 @@ static void calc_block_sig(struct mlx5_cmd_prot_block *block) static void calc_chain_sig(struct mlx5_cmd_msg *msg) { struct mlx5_cmd_mailbox *next = msg->next; - int size = msg->len; - int blen = size - min_t(int, sizeof(msg->first.data), size); - int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) - / MLX5_CMD_DATA_BLOCK_SIZE; + int n = mlx5_calc_cmd_blocks(msg); int i = 0; for (i = 0; i < n && next; i++) { @@ -220,12 +225,9 @@ static void free_cmd(struct mlx5_cmd_work_ent *ent) static int verify_signature(struct mlx5_cmd_work_ent *ent) { struct mlx5_cmd_mailbox *next = ent->out->next; + int n = mlx5_calc_cmd_blocks(ent->out); int err; u8 sig; - int size = ent->out->len; - int blen = size - min_t(int, sizeof(ent->out->first.data), size); - int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) - / MLX5_CMD_DATA_BLOCK_SIZE; int i = 0; sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); @@ -720,9 +722,11 @@ static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); struct mlx5_cmd_mailbox *next = msg->next; + int n = mlx5_calc_cmd_blocks(msg); int data_only; u32 offset = 0; int dump_len; + int i; data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); @@ -749,7 +753,7 @@ static void dump_command(struct mlx5_core_dev *dev, offset += sizeof(*ent->lay); } - while (next && offset < msg->len) { + for (i = 0; i < n && next; i++) { if (data_only) { dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); dump_buf(next->buf, dump_len, 1, offset); @@ -1137,7 +1141,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *tmp, *head = NULL; struct mlx5_cmd_prot_block *block; struct mlx5_cmd_msg *msg; - int blen; int err; int n; int i; @@ -1146,8 +1149,8 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, if (!msg) return ERR_PTR(-ENOMEM); - blen = size - min_t(int, sizeof(msg->first.data), size); - n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE; + msg->len = size; + n = mlx5_calc_cmd_blocks(msg); for (i = 0; i < n; i++) { tmp = alloc_cmd_box(dev, flags); @@ -1165,7 +1168,6 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, head = tmp; } msg->next = head; - msg->len = size; return msg; err_alloc: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index d93ff567b40d..b3820a34e773 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -235,7 +235,7 @@ const char *parse_fs_dst(struct trace_seq *p, switch (dst->type) { case MLX5_FLOW_DESTINATION_TYPE_VPORT: - trace_seq_printf(p, "vport=%u\n", dst->vport_num); + trace_seq_printf(p, "vport=%u\n", dst->vport.num); break; case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: trace_seq_printf(p, "ft=%p\n", dst->ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 30cad07be2b5..eb9eb7aa953a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -53,6 +53,11 @@ #include "mlx5_core.h" #include "en_stats.h" +struct page_pool; + +#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) +#define MLX5E_METADATA_ETHER_LEN 8 + #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) @@ -60,6 +65,7 @@ #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) +#define MLX5E_MAX_PRIORITY 8 #define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_NUM_TC 8 @@ -95,18 +101,22 @@ (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \ (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU)) +#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM)) +#define MLX5E_LOG_MAX_RX_WQE_BULK \ + (ilog2(PAGE_SIZE / roundup_pow_of_two(MLX5E_MIN_SKB_FRAG_SZ))) + #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd -#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1 +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK) #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \ MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW) #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) +#define MLX5E_RX_MAX_HEAD (256) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_DEFAULT_LRO_TIMEOUT 32 @@ -177,11 +187,16 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; }; -struct mlx5e_rx_wqe { +struct mlx5e_rx_wqe_ll { struct mlx5_wqe_srq_next_seg next; - struct mlx5_wqe_data_seg data; + struct mlx5_wqe_data_seg data[0]; +}; + +struct mlx5e_rx_wqe_cyc { + struct mlx5_wqe_data_seg data[0]; }; struct mlx5e_umr_wqe { @@ -239,6 +254,7 @@ struct mlx5e_params { bool vlan_strip_disable; bool scatter_fcs_en; bool rx_dim_enabled; + bool tx_dim_enabled; u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; @@ -269,6 +285,11 @@ struct mlx5e_dcbx { /* The only setting that cannot be read from FW */ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; u8 cap; + + /* Buffer configuration */ + bool manual_buffer; + u32 cable_len; + u32 xoff; }; struct mlx5e_dcbx_dp { @@ -282,8 +303,6 @@ enum { MLX5E_RQ_STATE_AM, }; -#define MLX5E_TEST_BIT(state, nr) (state & BIT(nr)) - struct mlx5e_cq { /* data path - accessed per cqe */ struct mlx5_cqwq wq; @@ -303,7 +322,7 @@ struct mlx5e_cq { /* control */ struct mlx5_core_dev *mdev; - struct mlx5_frag_wq_ctrl wq_ctrl; + struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; struct mlx5e_tx_wqe_info { @@ -328,6 +347,8 @@ enum { MLX5E_SQ_STATE_ENABLED, MLX5E_SQ_STATE_RECOVERING, MLX5E_SQ_STATE_IPSEC, + MLX5E_SQ_STATE_AM, + MLX5E_SQ_STATE_TLS, }; struct mlx5e_sq_wqe_info { @@ -340,11 +361,11 @@ struct mlx5e_txqsq { /* dirtied @completion */ u16 cc; u32 dma_fifo_cc; + struct net_dim dim; /* Adaptive Moderation */ /* dirtied @xmit */ u16 pc ____cacheline_aligned_in_smp; u32 dma_fifo_pc; - struct mlx5e_sq_stats stats; struct mlx5e_cq cq; @@ -357,11 +378,11 @@ struct mlx5e_txqsq { /* read only */ struct mlx5_wq_cyc wq; u32 dma_fifo_mask; + struct mlx5e_sq_stats *stats; void __iomem *uar_map; struct netdev_queue *txq; u32 sqn; u8 min_inline_mode; - u16 edge; struct device *pdev; __be32 mkey_be; unsigned long state; @@ -392,6 +413,7 @@ struct mlx5e_xdpsq { struct { struct mlx5e_dma_info *di; bool doorbell; + bool redirect_flush; } db; /* read only */ @@ -425,7 +447,6 @@ struct mlx5e_icosq { struct mlx5_wq_cyc wq; void __iomem *uar_map; u32 sqn; - u16 edge; unsigned long state; /* control path */ @@ -436,7 +457,7 @@ struct mlx5e_icosq { static inline bool mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) { - return (((wq->sz_m1 & (cc - pc)) >= n) || (cc == pc)); + return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); } struct mlx5e_dma_info { @@ -445,8 +466,9 @@ struct mlx5e_dma_info { }; struct mlx5e_wqe_frag_info { - struct mlx5e_dma_info di; + struct mlx5e_dma_info *di; u32 offset; + bool last_in_page; }; struct mlx5e_umr_dma_info { @@ -459,6 +481,8 @@ struct mlx5e_mpw_info { DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); }; +#define MLX5E_MAX_RX_FRAGS 4 + /* a single cache unit is capable to serve one napi call (for non-striding rq) * or a MPWQE (for striding rq). */ @@ -476,6 +500,9 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*); typedef struct sk_buff * (*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); +typedef struct sk_buff * +(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); @@ -483,19 +510,30 @@ enum mlx5e_rq_flag { MLX5E_RQ_FLAG_XDP_XMIT = BIT(0), }; +struct mlx5e_rq_frag_info { + int frag_size; + int frag_stride; +}; + +struct mlx5e_rq_frags_info { + struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS]; + u8 num_frags; + u8 log_num_frags; + u8 wqe_bulk; +}; + struct mlx5e_rq { /* data path */ - struct mlx5_wq_ll wq; - union { struct { - struct mlx5e_wqe_frag_info *frag_info; - u32 frag_sz; /* max possible skb frag_sz */ - union { - bool page_reuse; - }; + struct mlx5_wq_cyc wq; + struct mlx5e_wqe_frag_info *frags; + struct mlx5e_dma_info *di; + struct mlx5e_rq_frags_info info; + mlx5e_fp_skb_from_cqe skb_from_cqe; } wqe; struct { + struct mlx5_wq_ll wq; struct mlx5e_umr_wqe umr_wqe; struct mlx5e_mpw_info *info; mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; @@ -506,14 +544,13 @@ struct mlx5e_rq { }; struct { u16 headroom; - u8 page_order; u8 map_dir; /* dma map direction */ } buff; struct mlx5e_channel *channel; struct device *pdev; struct net_device *netdev; - struct mlx5e_rq_stats stats; + struct mlx5e_rq_stats *stats; struct mlx5e_cq cq; struct mlx5e_page_cache page_cache; struct hwtstamp_config *tstamp; @@ -533,6 +570,7 @@ struct mlx5e_rq { unsigned int hw_mtu; struct mlx5e_xdpsq xdpsq; DECLARE_BITMAP(flags, 8); + struct page_pool *page_pool; /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -560,7 +598,7 @@ struct mlx5e_channel { /* data path - accessed per napi poll */ struct irq_desc *irq_desc; - struct mlx5e_ch_stats stats; + struct mlx5e_ch_stats *stats; /* control */ struct mlx5e_priv *priv; @@ -576,6 +614,12 @@ struct mlx5e_channels { struct mlx5e_params params; }; +struct mlx5e_channel_stats { + struct mlx5e_ch_stats ch; + struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_rq_stats rq; +} ____cacheline_aligned_in_smp; + enum mlx5e_traffic_types { MLX5E_TT_IPV4_TCP, MLX5E_TT_IPV6_TCP, @@ -625,7 +669,6 @@ struct mlx5e_flow_table { struct mlx5e_tc_table { struct mlx5_flow_table *t; - struct rhashtable_params ht_params; struct rhashtable ht; DECLARE_HASHTABLE(mod_hdr_tbl, 8); @@ -778,6 +821,8 @@ struct mlx5e_priv { struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; + struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS]; + u8 max_opened_tc; struct hwtstamp_config tstamp; u16 q_counter; u16 drop_rq_q_counter; @@ -790,6 +835,9 @@ struct mlx5e_priv { #ifdef CONFIG_MLX5_EN_IPSEC struct mlx5e_ipsec *ipsec; #endif +#ifdef CONFIG_MLX5_EN_TLS + struct mlx5e_tls *tls; +#endif }; struct mlx5e_profile { @@ -820,6 +868,8 @@ void mlx5e_build_ptys2ethtool_map(void); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe *wqe, u16 pi); void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); @@ -849,6 +899,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx); +struct sk_buff * +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); +struct sk_buff * +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); void mlx5e_update_stats(struct mlx5e_priv *priv); @@ -919,8 +975,6 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels); -int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, @@ -935,10 +989,21 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); } +static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + memset(*wqe, 0, sizeof(**wqe)); +} + static inline struct mlx5e_tx_wqe *mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) { - u16 pi = *pc & wq->sz_m1; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; @@ -1067,6 +1132,10 @@ void mlx5e_update_stats_work(struct work_struct *work); int mlx5e_bits_invert(unsigned long a, int size); +typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); +int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + change_hw_mtu_cb set_mtu_cb); + /* ethtool helpers */ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo); @@ -1092,9 +1161,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); -int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); - /* mlx5e generic netdev management API */ struct net_device* mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, @@ -1107,4 +1173,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, u16 max_channels, u16 mtu); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); +void mlx5e_tx_dim_work(struct work_struct *work); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c new file mode 100644 index 000000000000..24e3b564964f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "port.h" + +/* speed in units of 1Mb */ +static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = { + [MLX5E_1000BASE_CX_SGMII] = 1000, + [MLX5E_1000BASE_KX] = 1000, + [MLX5E_10GBASE_CX4] = 10000, + [MLX5E_10GBASE_KX4] = 10000, + [MLX5E_10GBASE_KR] = 10000, + [MLX5E_20GBASE_KR2] = 20000, + [MLX5E_40GBASE_CR4] = 40000, + [MLX5E_40GBASE_KR4] = 40000, + [MLX5E_56GBASE_R4] = 56000, + [MLX5E_10GBASE_CR] = 10000, + [MLX5E_10GBASE_SR] = 10000, + [MLX5E_10GBASE_ER] = 10000, + [MLX5E_40GBASE_SR4] = 40000, + [MLX5E_40GBASE_LR4] = 40000, + [MLX5E_50GBASE_SR2] = 50000, + [MLX5E_100GBASE_CR4] = 100000, + [MLX5E_100GBASE_SR4] = 100000, + [MLX5E_100GBASE_KR4] = 100000, + [MLX5E_100GBASE_LR4] = 100000, + [MLX5E_100BASE_TX] = 100, + [MLX5E_1000BASE_T] = 1000, + [MLX5E_10GBASE_T] = 10000, + [MLX5E_25GBASE_CR] = 25000, + [MLX5E_25GBASE_KR] = 25000, + [MLX5E_25GBASE_SR] = 25000, + [MLX5E_50GBASE_CR2] = 50000, + [MLX5E_50GBASE_KR2] = 50000, +}; + +u32 mlx5e_port_ptys2speed(u32 eth_proto_oper) +{ + unsigned long temp = eth_proto_oper; + u32 speed = 0; + int i; + + i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER); + if (i < MLX5E_LINK_MODES_NUMBER) + speed = mlx5e_link_speed[i]; + + return speed; +} + +int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {}; + u32 eth_proto_oper; + int err; + + err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); + if (err) + return err; + + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + *speed = mlx5e_port_ptys2speed(eth_proto_oper); + if (!(*speed)) { + mlx5_core_warn(mdev, "cannot get port speed\n"); + err = -EINVAL; + } + + return err; +} + +int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) +{ + u32 max_speed = 0; + u32 proto_cap; + int err; + int i; + + err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); + if (err) + return err; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) + if (proto_cap & MLX5E_PROT_MASK(i)) + max_speed = max(max_speed, mlx5e_link_speed[i]); + + *speed = max_speed; + return 0; +} + +u32 mlx5e_port_speed2linkmodes(u32 speed) +{ + u32 link_modes = 0; + int i; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { + if (mlx5e_link_speed[i] == speed) + link_modes |= MLX5E_PROT_MASK(i); + } + + return link_modes; +} + +int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out) +{ + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + void *in; + int err; + + in = kzalloc(sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(pbmc_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 0); + + kfree(in); + return err; +} + +int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in) +{ + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + void *out; + int err; + + out = kzalloc(sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + MLX5_SET(pbmc_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PBMC, 0, 1); + + kfree(out); + return err; +} + +/* buffer[i]: buffer that priority i mapped to */ +int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer) +{ + int sz = MLX5_ST_SZ_BYTES(pptb_reg); + u32 prio_x_buff; + void *out; + void *in; + int prio; + int err; + + in = kzalloc(sz, GFP_KERNEL); + out = kzalloc(sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(pptb_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0); + if (err) + goto out; + + prio_x_buff = MLX5_GET(pptb_reg, out, prio_x_buff); + for (prio = 0; prio < 8; prio++) { + buffer[prio] = (u8)(prio_x_buff >> (4 * prio)) & 0xF; + mlx5_core_dbg(mdev, "prio %d, buffer %d\n", prio, buffer[prio]); + } +out: + kfree(in); + kfree(out); + return err; +} + +int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer) +{ + int sz = MLX5_ST_SZ_BYTES(pptb_reg); + u32 prio_x_buff; + void *out; + void *in; + int prio; + int err; + + in = kzalloc(sz, GFP_KERNEL); + out = kzalloc(sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + /* First query the pptb register */ + MLX5_SET(pptb_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 0); + if (err) + goto out; + + memcpy(in, out, sz); + MLX5_SET(pptb_reg, in, local_port, 1); + + /* Update the pm and prio_x_buff */ + MLX5_SET(pptb_reg, in, pm, 0xFF); + + prio_x_buff = 0; + for (prio = 0; prio < 8; prio++) + prio_x_buff |= (buffer[prio] << (4 * prio)); + MLX5_SET(pptb_reg, in, prio_x_buff, prio_x_buff); + + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPTB, 0, 1); + +out: + kfree(in); + kfree(out); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h new file mode 100644 index 000000000000..f8cbd8194179 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5E_EN_PORT_H +#define __MLX5E_EN_PORT_H + +#include <linux/mlx5/driver.h> +#include "en.h" + +u32 mlx5e_port_ptys2speed(u32 eth_proto_oper); +int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +u32 mlx5e_port_speed2linkmodes(u32 speed); + +int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); +int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); +int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); +int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c new file mode 100644 index 000000000000..c047da8752da --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "port_buffer.h" + +int mlx5e_port_query_buffer(struct mlx5e_priv *priv, + struct mlx5e_port_buffer *port_buffer) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + u32 total_used = 0; + void *buffer; + void *out; + int err; + int i; + + out = kzalloc(sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5e_port_query_pbmc(mdev, out); + if (err) + goto out; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]); + port_buffer->buffer[i].lossy = + MLX5_GET(bufferx_reg, buffer, lossy); + port_buffer->buffer[i].epsb = + MLX5_GET(bufferx_reg, buffer, epsb); + port_buffer->buffer[i].size = + MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT; + port_buffer->buffer[i].xon = + MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT; + port_buffer->buffer[i].xoff = + MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT; + total_used += port_buffer->buffer[i].size; + + mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i, + port_buffer->buffer[i].size, + port_buffer->buffer[i].xon, + port_buffer->buffer[i].xoff, + port_buffer->buffer[i].epsb, + port_buffer->buffer[i].lossy); + } + + port_buffer->port_buffer_size = + MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT; + port_buffer->spare_buffer_size = + port_buffer->port_buffer_size - total_used; + + mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n", + port_buffer->port_buffer_size, + port_buffer->spare_buffer_size); +out: + kfree(out); + return err; +} + +static int port_set_buffer(struct mlx5e_priv *priv, + struct mlx5e_port_buffer *port_buffer) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(pbmc_reg); + void *buffer; + void *in; + int err; + int i; + + in = kzalloc(sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + err = mlx5e_port_query_pbmc(mdev, in); + if (err) + goto out; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]); + + MLX5_SET(bufferx_reg, buffer, size, + port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT); + MLX5_SET(bufferx_reg, buffer, lossy, + port_buffer->buffer[i].lossy); + MLX5_SET(bufferx_reg, buffer, xoff_threshold, + port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT); + MLX5_SET(bufferx_reg, buffer, xon_threshold, + port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT); + } + + err = mlx5e_port_set_pbmc(mdev, in); +out: + kfree(in); + return err; +} + +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ +static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) +{ + u32 speed; + u32 xoff; + int err; + + err = mlx5e_port_linkspeed(priv->mdev, &speed); + if (err) + return 0; + + xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; + + mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff); + return xoff; +} + +static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, + u32 xoff, unsigned int mtu) +{ + int i; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + if (port_buffer->buffer[i].lossy) { + port_buffer->buffer[i].xoff = 0; + port_buffer->buffer[i].xon = 0; + continue; + } + + if (port_buffer->buffer[i].size < + (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) + return -ENOMEM; + + port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; + port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; + } + + return 0; +} + +/** + * update_buffer_lossy() + * mtu: device's MTU + * pfc_en: <input> current pfc configuration + * buffer: <input> current prio to buffer mapping + * xoff: <input> xoff value + * port_buffer: <output> port receive buffer configuration + * change: <output> + * + * Update buffer configuration based on pfc configuraiton and priority + * to buffer mapping. + * Buffer's lossy bit is changed to: + * lossless if there is at least one PFC enabled priority mapped to this buffer + * lossy if all priorities mapped to this buffer are PFC disabled + * + * Return: + * Return 0 if no error. + * Set change to true if buffer configuration is modified. + */ +static int update_buffer_lossy(unsigned int mtu, + u8 pfc_en, u8 *buffer, u32 xoff, + struct mlx5e_port_buffer *port_buffer, + bool *change) +{ + bool changed = false; + u8 lossy_count; + u8 prio_count; + u8 lossy; + int prio; + int err; + int i; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + prio_count = 0; + lossy_count = 0; + + for (prio = 0; prio < MLX5E_MAX_PRIORITY; prio++) { + if (buffer[prio] != i) + continue; + + prio_count++; + lossy_count += !(pfc_en & (1 << prio)); + } + + if (lossy_count == prio_count) + lossy = 1; + else /* lossy_count < prio_count */ + lossy = 0; + + if (lossy != port_buffer->buffer[i].lossy) { + port_buffer->buffer[i].lossy = lossy; + changed = true; + } + } + + if (changed) { + err = update_xoff_threshold(port_buffer, xoff, mtu); + if (err) + return err; + + *change = true; + } + + return 0; +} + +int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + u32 change, unsigned int mtu, + struct ieee_pfc *pfc, + u32 *buffer_size, + u8 *prio2buffer) +{ + struct mlx5e_port_buffer port_buffer; + u32 xoff = calculate_xoff(priv, mtu); + bool update_prio2buffer = false; + u8 buffer[MLX5E_MAX_PRIORITY]; + bool update_buffer = false; + u32 total_used = 0; + u8 curr_pfc_en; + int err; + int i; + + mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); + + err = mlx5e_port_query_buffer(priv, &port_buffer); + if (err) + return err; + + if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { + update_buffer = true; + err = update_xoff_threshold(&port_buffer, xoff, mtu); + if (err) + return err; + } + + if (change & MLX5E_PORT_BUFFER_PFC) { + err = mlx5e_port_query_priority2buffer(priv->mdev, buffer); + if (err) + return err; + + err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, + &port_buffer, &update_buffer); + if (err) + return err; + } + + if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { + update_prio2buffer = true; + err = mlx5_query_port_pfc(priv->mdev, &curr_pfc_en, NULL); + if (err) + return err; + + err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, + &port_buffer, &update_buffer); + if (err) + return err; + } + + if (change & MLX5E_PORT_BUFFER_SIZE) { + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]); + if (!port_buffer.buffer[i].lossy && !buffer_size[i]) { + mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n", + __func__, i); + return -EINVAL; + } + + port_buffer.buffer[i].size = buffer_size[i]; + total_used += buffer_size[i]; + } + + mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used); + + if (total_used > port_buffer.port_buffer_size) + return -EINVAL; + + update_buffer = true; + err = update_xoff_threshold(&port_buffer, xoff, mtu); + if (err) + return err; + } + + /* Need to update buffer configuration if xoff value is changed */ + if (!update_buffer && xoff != priv->dcbx.xoff) { + update_buffer = true; + err = update_xoff_threshold(&port_buffer, xoff, mtu); + if (err) + return err; + } + priv->dcbx.xoff = xoff; + + /* Apply the settings */ + if (update_buffer) { + err = port_set_buffer(priv, &port_buffer); + if (err) + return err; + } + + if (update_prio2buffer) + err = mlx5e_port_set_priority2buffer(priv->mdev, prio2buffer); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h new file mode 100644 index 000000000000..34f55b81a0de --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_EN_PORT_BUFFER_H__ +#define __MLX5_EN_PORT_BUFFER_H__ + +#include "en.h" +#include "port.h" + +#define MLX5E_MAX_BUFFER 8 +#define MLX5E_BUFFER_CELL_SHIFT 7 +#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */ + +#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \ + MLX5_CAP_PCAM_REG(mdev, pbmc) && \ + MLX5_CAP_PCAM_REG(mdev, pptb)) + +enum { + MLX5E_PORT_BUFFER_CABLE_LEN = BIT(0), + MLX5E_PORT_BUFFER_PFC = BIT(1), + MLX5E_PORT_BUFFER_PRIO2BUFFER = BIT(2), + MLX5E_PORT_BUFFER_SIZE = BIT(3), +}; + +struct mlx5e_bufferx_reg { + u8 lossy; + u8 epsb; + u32 size; + u32 xoff; + u32 xon; +}; + +struct mlx5e_port_buffer { + u32 port_buffer_size; + u32 spare_buffer_size; + struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER]; +}; + +int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, + u32 change, unsigned int mtu, + struct ieee_pfc *pfc, + u32 *buffer_size, + u8 *prio2buffer); + +int mlx5e_port_query_buffer(struct mlx5e_priv *priv, + struct mlx5e_port_buffer *port_buffer); +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h new file mode 100644 index 000000000000..f20074dbef32 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5E_EN_ACCEL_H__ +#define __MLX5E_EN_ACCEL_H__ + +#ifdef CONFIG_MLX5_ACCEL + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls_rxtx.h" +#include "en.h" + +static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, + struct mlx5e_txqsq *sq, + struct net_device *dev, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ +#ifdef CONFIG_MLX5_EN_TLS + if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { + skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi); + if (unlikely(!skb)) + return NULL; + } +#endif + +#ifdef CONFIG_MLX5_EN_IPSEC + if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { + skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb); + if (unlikely(!skb)) + return NULL; + } +#endif + + return skb; +} + +#endif /* CONFIG_MLX5_ACCEL */ + +#endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 1198fc1eba4c..93bf10e6508c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -45,9 +45,6 @@ #define MLX5E_IPSEC_SADB_RX_BITS 10 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L -#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) -#define MLX5E_METADATA_ETHER_LEN 8 - struct mlx5e_priv; struct mlx5e_ipsec_sw_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c new file mode 100644 index 000000000000..d167845271c3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/netdevice.h> +#include <net/ipv6.h> +#include "en_accel/tls.h" +#include "accel/tls.h" + +static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk) +{ + struct inet_sock *inet = inet_sk(sk); + + MLX5_SET(tls_flow, flow, ipv6, 0); + memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); + memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4), + &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4)); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + MLX5_SET(tls_flow, flow, ipv6, 1); + memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6), + &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); +} +#endif + +static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk) +{ + struct inet_sock *inet = inet_sk(sk); + + memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport, + MLX5_FLD_SZ_BYTES(tls_flow, src_port)); + memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport, + MLX5_FLD_SZ_BYTES(tls_flow, dst_port)); +} + +static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps) +{ + switch (sk->sk_family) { + case AF_INET: + mlx5e_tls_set_ipv4_flow(flow, sk); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + if (!sk->sk_ipv6only && + ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { + mlx5e_tls_set_ipv4_flow(flow, sk); + break; + } + if (!(caps & MLX5_ACCEL_TLS_IPV6)) + goto error_out; + + mlx5e_tls_set_ipv6_flow(flow, sk); + break; +#endif + default: + goto error_out; + } + + mlx5e_tls_set_flow_tcp_ports(flow, sk); + return 0; +error_out: + return -EINVAL; +} + +static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct mlx5_core_dev *mdev = priv->mdev; + u32 caps = mlx5_accel_tls_device_caps(mdev); + int ret = -ENOMEM; + void *flow; + + if (direction != TLS_OFFLOAD_CTX_DIR_TX) + return -EINVAL; + + flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL); + if (!flow) + return ret; + + ret = mlx5e_tls_set_flow(flow, sk, caps); + if (ret) + goto free_flow; + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + struct mlx5e_tls_offload_context *tx_ctx = + mlx5e_get_tls_tx_context(tls_ctx); + u32 swid; + + ret = mlx5_accel_tls_add_tx_flow(mdev, flow, crypto_info, + start_offload_tcp_sn, &swid); + if (ret < 0) + goto free_flow; + + tx_ctx->swid = htonl(swid); + tx_ctx->expected_seq = start_offload_tcp_sn; + } + + return 0; +free_flow: + kfree(flow); + return ret; +} + +static void mlx5e_tls_del(struct net_device *netdev, + struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + u32 swid = ntohl(mlx5e_get_tls_tx_context(tls_ctx)->swid); + + mlx5_accel_tls_del_tx_flow(priv->mdev, swid); + } else { + netdev_err(netdev, "unsupported direction %d\n", direction); + } +} + +static const struct tlsdev_ops mlx5e_tls_ops = { + .tls_dev_add = mlx5e_tls_add, + .tls_dev_del = mlx5e_tls_del, +}; + +void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + + if (!mlx5_accel_is_tls_device(priv->mdev)) + return; + + netdev->features |= NETIF_F_HW_TLS_TX; + netdev->hw_features |= NETIF_F_HW_TLS_TX; + netdev->tlsdev_ops = &mlx5e_tls_ops; +} + +int mlx5e_tls_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL); + + if (!tls) + return -ENOMEM; + + priv->tls = tls; + return 0; +} + +void mlx5e_tls_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_tls *tls = priv->tls; + + if (!tls) + return; + + kfree(tls); + priv->tls = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h new file mode 100644 index 000000000000..b6162178f621 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ +#ifndef __MLX5E_TLS_H__ +#define __MLX5E_TLS_H__ + +#ifdef CONFIG_MLX5_EN_TLS + +#include <net/tls.h> +#include "en.h" + +struct mlx5e_tls_sw_stats { + atomic64_t tx_tls_drop_metadata; + atomic64_t tx_tls_drop_resync_alloc; + atomic64_t tx_tls_drop_no_sync_data; + atomic64_t tx_tls_drop_bypass_required; +}; + +struct mlx5e_tls { + struct mlx5e_tls_sw_stats sw_stats; +}; + +struct mlx5e_tls_offload_context { + struct tls_offload_context base; + u32 expected_seq; + __be32 swid; +}; + +static inline struct mlx5e_tls_offload_context * +mlx5e_get_tls_tx_context(struct tls_context *tls_ctx) +{ + BUILD_BUG_ON(sizeof(struct mlx5e_tls_offload_context) > + TLS_OFFLOAD_CONTEXT_SIZE); + return container_of(tls_offload_ctx(tls_ctx), + struct mlx5e_tls_offload_context, + base); +} + +void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); +int mlx5e_tls_init(struct mlx5e_priv *priv); +void mlx5e_tls_cleanup(struct mlx5e_priv *priv); + +int mlx5e_tls_get_count(struct mlx5e_priv *priv); +int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data); +int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data); + +#else + +static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) { } +static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { } +static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } +static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; } +static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; } + +#endif + +#endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c new file mode 100644 index 000000000000..15aef71d1957 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -0,0 +1,278 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include "en_accel/tls.h" +#include "en_accel/tls_rxtx.h" + +#define SYNDROME_OFFLOAD_REQUIRED 32 +#define SYNDROME_SYNC 33 + +struct sync_info { + u64 rcd_sn; + s32 sync_len; + int nr_frags; + skb_frag_t frags[MAX_SKB_FRAGS]; +}; + +struct mlx5e_tls_metadata { + /* One byte of syndrome followed by 3 bytes of swid */ + __be32 syndrome_swid; + __be16 first_seq; + /* packet type ID field */ + __be16 ethertype; +} __packed; + +static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid) +{ + struct mlx5e_tls_metadata *pet; + struct ethhdr *eth; + + if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata))) + return -ENOMEM; + + eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata)); + skb->mac_header -= sizeof(struct mlx5e_tls_metadata); + pet = (struct mlx5e_tls_metadata *)(eth + 1); + + memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata), + 2 * ETH_ALEN); + + eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE); + pet->syndrome_swid = htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid; + + return 0; +} + +static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context *context, + u32 tcp_seq, struct sync_info *info) +{ + int remaining, i = 0, ret = -EINVAL; + struct tls_record_info *record; + unsigned long flags; + s32 sync_size; + + spin_lock_irqsave(&context->base.lock, flags); + record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn); + + if (unlikely(!record)) + goto out; + + sync_size = tcp_seq - tls_record_start_seq(record); + info->sync_len = sync_size; + if (unlikely(sync_size < 0)) { + if (tls_record_is_start_marker(record)) + goto done; + + goto out; + } + + remaining = sync_size; + while (remaining > 0) { + info->frags[i] = record->frags[i]; + __skb_frag_ref(&info->frags[i]); + remaining -= skb_frag_size(&info->frags[i]); + + if (remaining < 0) + skb_frag_size_add(&info->frags[i], remaining); + + i++; + } + info->nr_frags = i; +done: + ret = 0; +out: + spin_unlock_irqrestore(&context->base.lock, flags); + return ret; +} + +static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb, + struct sk_buff *nskb, u32 tcp_seq, + int headln, __be64 rcd_sn) +{ + struct mlx5e_tls_metadata *pet; + u8 syndrome = SYNDROME_SYNC; + struct iphdr *iph; + struct tcphdr *th; + int data_len, mss; + + nskb->dev = skb->dev; + skb_reset_mac_header(nskb); + skb_set_network_header(nskb, skb_network_offset(skb)); + skb_set_transport_header(nskb, skb_transport_offset(skb)); + memcpy(nskb->data, skb->data, headln); + memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn)); + + iph = ip_hdr(nskb); + iph->tot_len = htons(nskb->len - skb_network_offset(nskb)); + th = tcp_hdr(nskb); + data_len = nskb->len - headln; + tcp_seq -= data_len; + th->seq = htonl(tcp_seq); + + mss = nskb->dev->mtu - (headln - skb_network_offset(nskb)); + skb_shinfo(nskb)->gso_size = 0; + if (data_len > mss) { + skb_shinfo(nskb)->gso_size = mss; + skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss); + } + skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; + + pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr)); + memcpy(pet, &syndrome, sizeof(syndrome)); + pet->first_seq = htons(tcp_seq); + + /* MLX5 devices don't care about the checksum partial start, offset + * and pseudo header + */ + nskb->ip_summed = CHECKSUM_PARTIAL; + + nskb->xmit_more = 1; + nskb->queue_mapping = skb->queue_mapping; +} + +static struct sk_buff * +mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, + struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi, + struct mlx5e_tls *tls) +{ + u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); + struct sync_info info; + struct sk_buff *nskb; + int linear_len = 0; + int headln; + int i; + + sq->stats->tls_ooo++; + + if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { + /* We might get here if a retransmission reaches the driver + * after the relevant record is acked. + * It should be safe to drop the packet in this case + */ + atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data); + goto err_out; + } + + if (unlikely(info.sync_len < 0)) { + u32 payload; + + headln = skb_transport_offset(skb) + tcp_hdrlen(skb); + payload = skb->len - headln; + if (likely(payload <= -info.sync_len)) + /* SKB payload doesn't require offload + */ + return skb; + + atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); + goto err_out; + } + + if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { + atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata); + goto err_out; + } + + headln = skb_transport_offset(skb) + tcp_hdrlen(skb); + linear_len += headln + sizeof(info.rcd_sn); + nskb = alloc_skb(linear_len, GFP_ATOMIC); + if (unlikely(!nskb)) { + atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc); + goto err_out; + } + + context->expected_seq = tcp_seq + skb->len - headln; + skb_put(nskb, linear_len); + for (i = 0; i < info.nr_frags; i++) + skb_shinfo(nskb)->frags[i] = info.frags[i]; + + skb_shinfo(nskb)->nr_frags = info.nr_frags; + nskb->data_len = info.sync_len; + nskb->len += info.sync_len; + sq->stats->tls_resync_bytes += nskb->len; + mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, + cpu_to_be64(info.rcd_sn)); + mlx5e_sq_xmit(sq, nskb, *wqe, *pi); + mlx5e_sq_fetch_wqe(sq, wqe, pi); + return skb; + +err_out: + dev_kfree_skb_any(skb); + return NULL; +} + +struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_tls_offload_context *context; + struct tls_context *tls_ctx; + u32 expected_seq; + int datalen; + u32 skb_seq; + + if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) + goto out; + + datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (!datalen) + goto out; + + tls_ctx = tls_get_ctx(skb->sk); + if (unlikely(tls_ctx->netdev != netdev)) + goto out; + + skb_seq = ntohl(tcp_hdr(skb)->seq); + context = mlx5e_get_tls_tx_context(tls_ctx); + expected_seq = context->expected_seq; + + if (unlikely(expected_seq != skb_seq)) { + skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls); + goto out; + } + + if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { + atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); + dev_kfree_skb_any(skb); + skb = NULL; + goto out; + } + + context->expected_seq = skb_seq + datalen; +out: + return skb; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h new file mode 100644 index 000000000000..405dfd302225 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5E_TLS_RXTX_H__ +#define __MLX5E_TLS_RXTX_H__ + +#ifdef CONFIG_MLX5_EN_TLS + +#include <linux/skbuff.h> +#include "en.h" + +struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, + struct mlx5e_txqsq *sq, + struct sk_buff *skb, + struct mlx5e_tx_wqe **wqe, + u16 *pi); + +#endif /* CONFIG_MLX5_EN_TLS */ + +#endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c new file mode 100644 index 000000000000..01468ec27446 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/ethtool.h> +#include <net/sock.h> + +#include "en.h" +#include "accel/tls.h" +#include "fpga/sdk.h" +#include "en_accel/tls.h" + +static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_metadata) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_resync_alloc) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_no_sync_data) }, + { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) }, +}; + +#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ + atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) + +#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) + +int mlx5e_tls_get_count(struct mlx5e_priv *priv) +{ + if (!priv->tls) + return 0; + + return NUM_TLS_SW_COUNTERS; +} + +int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) +{ + unsigned int i, idx = 0; + + if (!priv->tls) + return 0; + + for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + mlx5e_tls_sw_stats_desc[i].format); + + return NUM_TLS_SW_COUNTERS; +} + +int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) +{ + int i, idx = 0; + + if (!priv->tls) + return 0; + + for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, + mlx5e_tls_sw_stats_desc, i); + + return NUM_TLS_SW_COUNTERS; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 610d485c4b03..75e4308ba786 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -213,7 +213,7 @@ out: } #define MLX5E_ARFS_NUM_GROUPS 2 -#define MLX5E_ARFS_GROUP1_SIZE BIT(12) +#define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1) #define MLX5E_ARFS_GROUP2_SIZE BIT(0) #define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\ MLX5E_ARFS_GROUP2_SIZE) @@ -565,7 +565,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv, err = mlx5_modify_rule_destination(rule, &dst, NULL); if (err) netdev_warn(priv->netdev, - "Failed to modfiy aRFS rule destination to rq=%d\n", rxq); + "Failed to modify aRFS rule destination to rq=%d\n", rxq); } static void arfs_handle_work(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index c641d5656b2d..0a52f31fef37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -32,8 +32,8 @@ #include <linux/device.h> #include <linux/netdevice.h> #include "en.h" - -#define MLX5E_MAX_PRIORITY 8 +#include "en/port.h" +#include "en/port_buffer.h" #define MLX5E_100MB (100000) #define MLX5E_1GB (1000000) @@ -41,6 +41,9 @@ #define MLX5E_CEE_STATE_UP 1 #define MLX5E_CEE_STATE_DOWN 0 +/* Max supported cable length is 1000 meters */ +#define MLX5E_MAX_CABLE_LENGTH 1000 + enum { MLX5E_VENDOR_TC_GROUP_NUM = 7, MLX5E_LOWEST_PRIO_GROUP = 0, @@ -338,6 +341,9 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause); } + if (MLX5_BUFFER_SUPPORTED(mdev)) + pfc->delay = priv->dcbx.cable_len; + return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); } @@ -346,16 +352,39 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + u32 old_cable_len = priv->dcbx.cable_len; + struct ieee_pfc pfc_new; + u32 changed = 0; u8 curr_pfc_en; - int ret; + int ret = 0; + /* pfc_en */ mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL); + if (pfc->pfc_en != curr_pfc_en) { + ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); + if (ret) + return ret; + mlx5_toggle_port_link(mdev); + changed |= MLX5E_PORT_BUFFER_PFC; + } - if (pfc->pfc_en == curr_pfc_en) - return 0; + if (pfc->delay && + pfc->delay < MLX5E_MAX_CABLE_LENGTH && + pfc->delay != priv->dcbx.cable_len) { + priv->dcbx.cable_len = pfc->delay; + changed |= MLX5E_PORT_BUFFER_CABLE_LEN; + } - ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); - mlx5_toggle_port_link(mdev); + if (MLX5_BUFFER_SUPPORTED(mdev)) { + pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en; + if (priv->dcbx.manual_buffer) + ret = mlx5e_port_manual_buffer_config(priv, changed, + dev->mtu, &pfc_new, + NULL, NULL); + + if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN)) + priv->dcbx.cable_len = old_cable_len; + } if (!ret) { mlx5e_dbg(HW, priv, @@ -873,6 +902,90 @@ static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state) cee_cfg->pfc_enable = state; } +static int mlx5e_dcbnl_getbuffer(struct net_device *dev, + struct dcbnl_buffer *dcb_buffer) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_port_buffer port_buffer; + u8 buffer[MLX5E_MAX_PRIORITY]; + int i, err; + + if (!MLX5_BUFFER_SUPPORTED(mdev)) + return -EOPNOTSUPP; + + err = mlx5e_port_query_priority2buffer(mdev, buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_PRIORITY; i++) + dcb_buffer->prio2buffer[i] = buffer[i]; + + err = mlx5e_port_query_buffer(priv, &port_buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) + dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size; + dcb_buffer->total_size = port_buffer.port_buffer_size; + + return 0; +} + +static int mlx5e_dcbnl_setbuffer(struct net_device *dev, + struct dcbnl_buffer *dcb_buffer) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_port_buffer port_buffer; + u8 old_prio2buffer[MLX5E_MAX_PRIORITY]; + u32 *buffer_size = NULL; + u8 *prio2buffer = NULL; + u32 changed = 0; + int i, err; + + if (!MLX5_BUFFER_SUPPORTED(mdev)) + return -EOPNOTSUPP; + + for (i = 0; i < DCBX_MAX_BUFFERS; i++) + mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]); + + for (i = 0; i < MLX5E_MAX_PRIORITY; i++) + mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]); + + err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_PRIORITY; i++) { + if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) { + changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER; + prio2buffer = dcb_buffer->prio2buffer; + break; + } + } + + err = mlx5e_port_query_buffer(priv, &port_buffer); + if (err) + return err; + + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { + if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) { + changed |= MLX5E_PORT_BUFFER_SIZE; + buffer_size = dcb_buffer->buffer_size; + break; + } + } + + if (!changed) + return 0; + + priv->dcbx.manual_buffer = true; + err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL, + buffer_size, prio2buffer); + return err; +} + const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_getets = mlx5e_dcbnl_ieee_getets, .ieee_setets = mlx5e_dcbnl_ieee_setets, @@ -884,6 +997,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_delapp = mlx5e_dcbnl_ieee_delapp, .getdcbx = mlx5e_dcbnl_getdcbx, .setdcbx = mlx5e_dcbnl_setdcbx, + .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer, + .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer, /* CEE interfaces */ .setall = mlx5e_dcbnl_setall, @@ -1091,5 +1206,8 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST) priv->dcbx.cap |= DCB_CAP_DCBX_HOST; + priv->dcbx.manual_buffer = false; + priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN; + mlx5e_ets_init(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index 602851ab5b14..d67adf70a97b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -33,16 +33,30 @@ #include <linux/net_dim.h> #include "en.h" +static void +mlx5e_complete_dim_work(struct net_dim *dim, struct net_dim_cq_moder moder, + struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) +{ + mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); + dim->state = NET_DIM_START_MEASURE; +} + void mlx5e_rx_dim_work(struct work_struct *work) { - struct net_dim *dim = container_of(work, struct net_dim, - work); + struct net_dim *dim = container_of(work, struct net_dim, work); struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); - struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, - dim->profile_ix); + struct net_dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); - mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, - cur_profile.usec, cur_profile.pkts); + mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq); +} - dim->state = NET_DIM_START_MEASURE; +void mlx5e_tx_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, work); + struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); + struct net_dim_cq_moder cur_moder = + net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + + mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 37fd0245b6c1..fffe514ba855 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -31,6 +31,7 @@ */ #include "en.h" +#include "en/port.h" void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo) @@ -59,18 +60,16 @@ static void mlx5e_get_drvinfo(struct net_device *dev, struct ptys2ethtool_config { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); - u32 speed; }; static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; -#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, ...) \ ({ \ struct ptys2ethtool_config *cfg; \ const unsigned int modes[] = { __VA_ARGS__ }; \ unsigned int i; \ cfg = &ptys2ethtool_table[reg_]; \ - cfg->speed = speed_; \ bitmap_zero(cfg->supported, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ bitmap_zero(cfg->advertised, \ @@ -83,55 +82,55 @@ static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; void mlx5e_build_ptys2ethtool_map(void) { - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, ETHTOOL_LINK_MODE_10000baseT_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } @@ -389,14 +388,20 @@ static int mlx5e_set_channels(struct net_device *dev, int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { + struct net_dim_cq_moder *rx_moder, *tx_moder; + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -EOPNOTSUPP; - coal->rx_coalesce_usecs = priv->channels.params.rx_cq_moderation.usec; - coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts; - coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec; - coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts; - coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; + rx_moder = &priv->channels.params.rx_cq_moderation; + coal->rx_coalesce_usecs = rx_moder->usec; + coal->rx_max_coalesced_frames = rx_moder->pkts; + coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; + + tx_moder = &priv->channels.params.tx_cq_moderation; + coal->tx_coalesce_usecs = tx_moder->usec; + coal->tx_max_coalesced_frames = tx_moder->pkts; + coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled; return 0; } @@ -438,6 +443,7 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) { + struct net_dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; int err = 0; @@ -463,11 +469,15 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, mutex_lock(&priv->state_lock); new_channels.params = priv->channels.params; - new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; - new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; - new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; - new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; - new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + rx_moder = &new_channels.params.rx_cq_moderation; + rx_moder->usec = coal->rx_coalesce_usecs; + rx_moder->pkts = coal->rx_max_coalesced_frames; + new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + + tx_moder = &new_channels.params.tx_cq_moderation; + tx_moder->usec = coal->tx_coalesce_usecs; + tx_moder->pkts = coal->tx_max_coalesced_frames; + new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; @@ -475,7 +485,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } /* we are opened */ - reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; + reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || + (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); + if (!reset) { mlx5e_set_priv_channels_coalesce(priv, coal); priv->channels.params = new_channels.params; @@ -604,43 +616,24 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings } } -int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) -{ - u32 max_speed = 0; - u32 proto_cap; - int err; - int i; - - err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); - if (err) - return err; - - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) - if (proto_cap & MLX5E_PROT_MASK(i)) - max_speed = max(max_speed, ptys2ethtool_table[i].speed); - - *speed = max_speed; - return 0; -} - static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, struct ethtool_link_ksettings *link_ksettings) { - int i; u32 speed = SPEED_UNKNOWN; u8 duplex = DUPLEX_UNKNOWN; if (!netif_carrier_ok(netdev)) goto out; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_oper & MLX5E_PROT_MASK(i)) { - speed = ptys2ethtool_table[i].speed; - duplex = DUPLEX_FULL; - break; - } + speed = mlx5e_port_ptys2speed(eth_proto_oper); + if (!speed) { + speed = SPEED_UNKNOWN; + goto out; } + + duplex = DUPLEX_FULL; + out: link_ksettings->base.speed = speed; link_ksettings->base.duplex = duplex; @@ -798,18 +791,6 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) return ptys_modes; } -static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) -{ - u32 i, speed_links = 0; - - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (ptys2ethtool_table[i].speed == speed) - speed_links |= MLX5E_PROT_MASK(i); - } - - return speed_links; -} - static int mlx5e_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *link_ksettings) { @@ -829,7 +810,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev, link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : - mlx5e_ethtool2ptys_speed_link(speed); + mlx5e_port_speed2linkmodes(speed); err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); if (err) { @@ -1534,6 +1515,9 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return -EOPNOTSUPP; if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params)) return -EINVAL; + } else if (priv->channels.params.lro_en) { + netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n"); + return -EINVAL; } new_channels.params = priv->channels.params; @@ -1608,6 +1592,10 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) out: mutex_unlock(&priv->state_lock); + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index f64dda2bed31..76cc10e44080 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -277,7 +277,6 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, } break; case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: - mlx5e_vport_context_update_vlans(priv); if (priv->fs.vlan.active_cvlans_rule[vid]) { mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); priv->fs.vlan.active_cvlans_rule[vid] = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b29c1d93f058..89c96a0f708e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -35,18 +35,23 @@ #include <linux/mlx5/fs.h> #include <net/vxlan.h> #include <linux/bpf.h> +#include <net/page_pool.h> #include "eswitch.h" #include "en.h" #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" #include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls.h" #include "accel/ipsec.h" +#include "accel/tls.h" #include "vxlan.h" +#include "en/port.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; struct mlx5_wq_param wq; + struct mlx5e_rq_frags_info frags_info; }; struct mlx5e_sq_param { @@ -89,7 +94,7 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) return true; } -static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) +static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) { if (!params->xdp_prog) { u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); @@ -103,19 +108,27 @@ static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params) static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) { - u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params); return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz); } +static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, + struct mlx5e_params *params) +{ + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); + + return !params->lro_en && frag_sz <= PAGE_SIZE; +} + static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { - u32 frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params); + u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params); s8 signed_log_num_strides_param; u8 log_num_strides; - if (params->lro_en || frag_sz > PAGE_SIZE) + if (!mlx5e_rx_is_linear_skb(mdev, params)) return false; if (MLX5_CAP_GEN(mdev, ext_stride_num_range)) @@ -141,7 +154,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) - return order_base_2(mlx5e_mpwqe_get_linear_frag_sz(params)); + return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); return MLX5E_MPWQE_STRIDE_SZ(mdev, MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); @@ -159,16 +172,15 @@ static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, { u16 linear_rq_headroom = params->xdp_prog ? XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; + bool is_linear_skb; linear_rq_headroom += NET_IP_ALIGN; - if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST) - return linear_rq_headroom; - - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) - return linear_rq_headroom; + is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ? + mlx5e_rx_is_linear_skb(mdev, params) : + mlx5e_rx_mpwqe_is_linear_skb(mdev, params); - return 0; + return is_linear_skb ? linear_rq_headroom : 0; } void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, @@ -178,14 +190,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, params->log_rq_mtu_frames = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; - switch (params->rq_wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - /* Extra room needed for build_skb */ - params->lro_wqe_sz -= mlx5e_get_rq_headroom(mdev, params) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - } mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, @@ -209,7 +213,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) && MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ? MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : - MLX5_WQ_TYPE_LINKED_LIST; + MLX5_WQ_TYPE_CYCLIC; } static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -323,10 +327,30 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } +static u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return mlx5_wq_ll_get_size(&rq->mpwqe.wq); + default: + return mlx5_wq_cyc_get_size(&rq->wqe.wq); + } +} + +static u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return rq->mpwqe.wq.cur_sz; + default: + return rq->wqe.wq.cur_sz; + } +} + static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, struct mlx5e_channel *c) { - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); + int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info), GFP_KERNEL, cpu_to_node(c->cpu)); @@ -374,7 +398,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq)); + u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey); } @@ -384,31 +408,77 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix) return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT; } +static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) +{ + struct mlx5e_wqe_frag_info next_frag, *prev; + int i; + + next_frag.di = &rq->wqe.di[0]; + next_frag.offset = 0; + prev = NULL; + + for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { + struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_wqe_frag_info *frag = + &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { + if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) { + next_frag.di++; + next_frag.offset = 0; + if (prev) + prev->last_in_page = true; + } + *frag = next_frag; + + /* prepare next */ + next_frag.offset += frag_info[f].frag_stride; + prev = frag; + } + } + + if (prev) + prev->last_in_page = true; +} + +static int mlx5e_init_di_list(struct mlx5e_rq *rq, + struct mlx5e_params *params, + int wq_sz, int cpu) +{ + int len = wq_sz << rq->wqe.info.log_num_frags; + + rq->wqe.di = kvzalloc_node(len * sizeof(*rq->wqe.di), + GFP_KERNEL, cpu_to_node(cpu)); + if (!rq->wqe.di) + return -ENOMEM; + + mlx5e_init_frags_partition(rq); + + return 0; +} + +static void mlx5e_free_di_list(struct mlx5e_rq *rq) +{ + kvfree(rq->wqe.di); +} + static int mlx5e_alloc_rq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_rq_param *rqp, struct mlx5e_rq *rq) { + struct page_pool_params pp_params = { 0 }; struct mlx5_core_dev *mdev = c->mdev; void *rqc = rqp->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); - u32 byte_count; - int npages; + u32 pool_size; int wq_sz; int err; int i; rqp->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq, - &rq->wq_ctrl); - if (err) - return err; - - rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; - - wq_sz = mlx5_wq_ll_get_size(&rq->wq); - rq->wq_type = params->rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@ -418,6 +488,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->ix = c->ix; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + rq->stats = &c->priv->channel_stats[c->ix].rq; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; if (IS_ERR(rq->xdp_prog)) { @@ -432,9 +503,21 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params); + pool_size = 1 << params->log_rq_mtu_frames; switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); + + pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params); + rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; @@ -459,8 +542,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params)); - byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; - err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_wq_destroy; @@ -468,16 +549,31 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = mlx5e_rq_alloc_mpwqe_info(rq, c); if (err) - goto err_destroy_umr_mkey; + goto err_free; break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - rq->wqe.frag_info = - kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info), - GFP_KERNEL, cpu_to_node(c->cpu)); - if (!rq->wqe.frag_info) { + default: /* MLX5_WQ_TYPE_CYCLIC */ + err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, + &rq->wq_ctrl); + if (err) + return err; + + rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; + + wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); + + rq->wqe.info = rqp->frags_info; + rq->wqe.frags = + kvzalloc_node((wq_sz << rq->wqe.info.log_num_frags) * + sizeof(*rq->wqe.frags), + GFP_KERNEL, cpu_to_node(c->cpu)); + if (!rq->wqe.frags) { err = -ENOMEM; - goto err_rq_wq_destroy; + goto err_free; } + + err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu); + if (err) + goto err_free; rq->post_wqes = mlx5e_post_rx_wqes; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; @@ -488,41 +584,71 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, #endif rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; if (!rq->handle_rx_cqe) { - kfree(rq->wqe.frag_info); err = -EINVAL; netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); - goto err_rq_wq_destroy; + goto err_free; } - byte_count = params->lro_en ? - params->lro_wqe_sz : - MLX5E_SW2HW_MTU(params, params->sw_mtu); -#ifdef CONFIG_MLX5_EN_IPSEC - if (MLX5_IPSEC_DEV(mdev)) - byte_count += MLX5E_METADATA_ETHER_LEN; -#endif - rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en; - - /* calc the required page order */ - rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count); - npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE); - rq->buff.page_order = order_base_2(npages); - - byte_count |= MLX5_HW_START_PADDING; + rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ? + mlx5e_skb_from_cqe_linear : + mlx5e_skb_from_cqe_nonlinear; rq->mkey_be = c->mkey_be; } - for (i = 0; i < wq_sz; i++) { - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); + /* Create a page_pool and register it with rxq */ + pp_params.order = 0; + pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pp_params.pool_size = pool_size; + pp_params.nid = cpu_to_node(c->cpu); + pp_params.dev = c->pdev; + pp_params.dma_dir = rq->buff.map_dir; + + /* page_pool can be used even when there is no rq->xdp_prog, + * given page_pool does not handle DMA mapping there is no + * required state to clear. And page_pool gracefully handle + * elevated refcnt. + */ + rq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rq->page_pool)) { + err = PTR_ERR(rq->page_pool); + rq->page_pool = NULL; + goto err_free; + } + err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, + MEM_TYPE_PAGE_POOL, rq->page_pool); + if (err) + goto err_free; + for (i = 0; i < wq_sz; i++) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + struct mlx5e_rx_wqe_ll *wqe = + mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); + u32 byte_count = + rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i); - wqe->data.addr = cpu_to_be64(dma_offset + rq->buff.headroom); - } + wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); + wqe->data[0].byte_count = cpu_to_be32(byte_count); + wqe->data[0].lkey = rq->mkey_be; + } else { + struct mlx5e_rx_wqe_cyc *wqe = + mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++) { + u32 frag_size = rq->wqe.info.arr[f].frag_size | + MLX5_HW_START_PADDING; - wqe->data.byte_count = cpu_to_be32(byte_count); - wqe->data.lkey = rq->mkey_be; + wqe->data[f].byte_count = cpu_to_be32(frag_size); + wqe->data[f].lkey = rq->mkey_be; + } + /* check if num_frags is not a pow of two */ + if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { + wqe->data[f].byte_count = 0; + wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY); + wqe->data[f].addr = 0; + } + } } INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); @@ -541,13 +667,23 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, return 0; -err_destroy_umr_mkey: - mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); +err_free: + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + kfree(rq->mpwqe.info); + mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + kvfree(rq->wqe.frags); + mlx5e_free_di_list(rq); + } err_rq_wq_destroy: if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); xdp_rxq_info_unreg(&rq->xdp_rxq); + if (rq->page_pool) + page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); return err; @@ -561,14 +697,17 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) bpf_prog_put(rq->xdp_prog); xdp_rxq_info_unreg(&rq->xdp_rxq); + if (rq->page_pool) + page_pool_destroy(rq->page_pool); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kfree(rq->mpwqe.info); mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - kfree(rq->wqe.frag_info); + default: /* MLX5_WQ_TYPE_CYCLIC */ + kvfree(rq->wqe.frags); + mlx5e_free_di_list(rq); } for (i = rq->page_cache.head; i != rq->page_cache.tail; @@ -608,8 +747,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq, MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); - mlx5_fill_page_array(&rq->wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + mlx5_fill_page_frag_array(&rq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); @@ -710,56 +849,58 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq) mlx5_core_destroy_rq(rq->mdev, rq->rqn); } -static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) +static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) { - unsigned long exp_time = jiffies + msecs_to_jiffies(20000); + unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); struct mlx5e_channel *c = rq->channel; - struct mlx5_wq_ll *wq = &rq->wq; - u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq)); + u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); - while (time_before(jiffies, exp_time)) { - if (wq->cur_sz >= min_wqes) + do { + if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes) return 0; msleep(20); - } + } while (time_before(jiffies, exp_time)); + + netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", + c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); - netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", - rq->rqn, wq->cur_sz, min_wqes); return -ETIMEDOUT; } static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe; __be16 wqe_ix_be; u16 wqe_ix; - /* UMR WQE (if in progress) is always at wq->head */ - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && - rq->mpwqe.umr_in_progress) - mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); - - while (!mlx5_wq_ll_is_empty(wq)) { - wqe_ix_be = *wq->tail_next; - wqe_ix = be16_to_cpu(wqe_ix_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); - rq->dealloc_wqe(rq, wqe_ix); - mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, - &wqe->next.next_wqe_index); - } + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) { - /* Clean outstanding pages on handled WQEs that decided to do page-reuse, - * but yet to be re-posted. - */ - int wq_sz = mlx5_wq_ll_get_size(&rq->wq); + /* UMR WQE (if in progress) is always at wq->head */ + if (rq->mpwqe.umr_in_progress) + mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); + + while (!mlx5_wq_ll_is_empty(wq)) { + struct mlx5e_rx_wqe_ll *wqe; + + wqe_ix_be = *wq->tail_next; + wqe_ix = be16_to_cpu(wqe_ix_be); + wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); + rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_ll_pop(wq, wqe_ix_be, + &wqe->next.next_wqe_index); + } + } else { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; - for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++) + while (!mlx5_wq_cyc_is_empty(wq)) { + wqe_ix = mlx5_wq_cyc_get_tail(wq); rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_cyc_pop(wq); + } } + } static int mlx5e_open_rq(struct mlx5e_channel *c, @@ -782,7 +923,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, goto err_destroy_rq; if (params->rx_dim_enabled) - c->rq.state |= BIT(MLX5E_RQ_STATE_AM); + __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); return 0; @@ -797,13 +938,15 @@ err_free_rq: static void mlx5e_activate_rq(struct mlx5e_rq *rq) { struct mlx5e_icosq *sq = &rq->channel->icosq; - u16 pi = sq->pc & sq->wq.sz_m1; + struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_tx_wqe *nopwqe; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl); + nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) @@ -846,6 +989,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->pdev = c->pdev; @@ -855,10 +999,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, sq->min_inline_mode = params->tx_min_inline_mode; param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -901,23 +1045,22 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS; - return 0; err_sq_wq_destroy: @@ -962,10 +1105,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq) + struct mlx5e_txqsq *sq, + int tc) { void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); struct mlx5_core_dev *mdev = c->mdev; + struct mlx5_wq_cyc *wq = &sq->wq; int err; sq->pdev = c->pdev; @@ -976,21 +1121,25 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->txq_ix = txq_ix; sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; + sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); + if (mlx5_accel_is_tls_device(c->priv->mdev)) + set_bit(MLX5E_SQ_STATE_TLS, &sq->state); param->wq.db_numa_node = cpu_to_node(c->cpu); - err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); if (err) return err; - sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; + wq->db = &wq->db[MLX5_SND_DBR]; err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; - sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; + INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); + sq->dim.mode = params->tx_cq_moderation.cq_period_mode; return 0; @@ -1051,7 +1200,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); - mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + mlx5_fill_page_frag_array(&csp->wq_ctrl->buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); err = mlx5_core_create_sq(mdev, in, inlen, sqn); @@ -1130,13 +1280,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq) + struct mlx5e_txqsq *sq, + int tc) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; int err; - err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq); + err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); if (err) return err; @@ -1153,6 +1304,9 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, if (tx_rate) mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); + if (params->tx_dim_enabled) + sq->state |= BIT(MLX5E_SQ_STATE_AM); + return 0; err_free_txqsq: @@ -1191,6 +1345,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { struct mlx5e_channel *c = sq->channel; + struct mlx5_wq_cyc *wq = &sq->wq; clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); /* prevent netif_tx_wake_queue */ @@ -1199,12 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) netif_tx_disable_queue(sq->txq); /* last doorbell out, godspeed .. */ - if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) { + if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *nop; - sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL; - nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); - mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl); + sq->db.wqe_info[pi].skb = NULL; + nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); } } @@ -1319,7 +1475,7 @@ static void mlx5e_sq_recover(struct work_struct *work) return; mlx5e_reset_txqsq_cc_pc(sq); - sq->stats.recover++; + sq->stats->recover++; recover->last_recover = jiffies; mlx5e_activate_txqsq(sq); } @@ -1488,7 +1644,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c, static void mlx5e_free_cq(struct mlx5e_cq *cq) { - mlx5_cqwq_destroy(&cq->wq_ctrl); + mlx5_wq_destroy(&cq->wq_ctrl); } static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) @@ -1504,7 +1660,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) int err; inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * cq->wq_ctrl.frag_buf.npages; + sizeof(u64) * cq->wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1513,7 +1669,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) memcpy(cqc, param->cqc, sizeof(param->cqc)); - mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf, + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); @@ -1521,7 +1677,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); @@ -1614,14 +1770,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - int err; - int tc; + struct mlx5e_priv *priv = c->priv; + int err, tc, max_nch = priv->profile->max_nch(priv->mdev); for (tc = 0; tc < params->num_tc; tc++) { - int txq_ix = c->ix + tc * params->num_channels; + int txq_ix = c->ix + tc * max_nch; err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, - params, &cparam->sq, &c->sq[tc]); + params, &cparam->sq, &c->sq[tc], tc); if (err) goto err_close_sqs; } @@ -1751,6 +1907,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; + c->stats = &priv->channel_stats[ix].ch; mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); c->irq_desc = irq_to_desc(irq); @@ -1864,6 +2021,76 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) kfree(c); } +#define DEFAULT_FRAG_SIZE (2048) + +static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_frags_info *info) +{ + u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu); + int frag_size_max = DEFAULT_FRAG_SIZE; + u32 buf_size = 0; + int i; + +#ifdef CONFIG_MLX5_EN_IPSEC + if (MLX5_IPSEC_DEV(mdev)) + byte_count += MLX5E_METADATA_ETHER_LEN; +#endif + + if (mlx5e_rx_is_linear_skb(mdev, params)) { + int frag_stride; + + frag_stride = mlx5e_rx_get_linear_frag_sz(params); + frag_stride = roundup_pow_of_two(frag_stride); + + info->arr[0].frag_size = byte_count; + info->arr[0].frag_stride = frag_stride; + info->num_frags = 1; + info->wqe_bulk = PAGE_SIZE / frag_stride; + goto out; + } + + if (byte_count > PAGE_SIZE + + (MLX5E_MAX_RX_FRAGS - 1) * frag_size_max) + frag_size_max = PAGE_SIZE; + + i = 0; + while (buf_size < byte_count) { + int frag_size = byte_count - buf_size; + + if (i < MLX5E_MAX_RX_FRAGS - 1) + frag_size = min(frag_size, frag_size_max); + + info->arr[i].frag_size = frag_size; + info->arr[i].frag_stride = roundup_pow_of_two(frag_size); + + buf_size += frag_size; + i++; + } + info->num_frags = i; + /* number of different wqes sharing a page */ + info->wqe_bulk = 1 + (info->num_frags % 2); + +out: + info->wqe_bulk = max_t(u8, info->wqe_bulk, 8); + info->log_num_frags = order_base_2(info->num_frags); +} + +static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs) +{ + int sz = sizeof(struct mlx5_wqe_data_seg) * ndsegs; + + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + sz += sizeof(struct mlx5e_rx_wqe_ll); + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + sz += sizeof(struct mlx5e_rx_wqe_cyc); + } + + return order_base_2(sz); +} + static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_rq_param *param) @@ -1871,6 +2098,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + int ndsegs = 1; switch (params->rq_wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -1880,23 +2108,24 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wqe_stride_size, mlx5e_mpwqe_get_log_stride_size(mdev, params) - MLX5_MPWQE_LOG_STRIDE_SZ_BASE); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params)); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + default: /* MLX5_WQ_TYPE_CYCLIC */ MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); + mlx5e_build_rq_frags_info(mdev, params, ¶m->frags_info); + ndsegs = param->frags_info.num_frags; } + MLX5_SET(wq, wq, wq_type, params->rq_wq_type); MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); - MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(params->rq_wq_type, ndsegs)); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); - param->wq.linear = 1; } static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, @@ -1906,8 +2135,9 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); - MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, log_wq_stride, + mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1)); MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter); param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev); @@ -1958,7 +2188,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) + mlx5e_mpwqe_get_log_num_strides(mdev, params); break; - default: /* MLX5_WQ_TYPE_LINKED_LIST */ + default: /* MLX5_WQ_TYPE_CYCLIC */ log_cq_size = params->log_rq_mtu_frames; } @@ -2084,13 +2314,11 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) int err = 0; int i; - for (i = 0; i < chs->num; i++) { - err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq); - if (err) - break; - } + for (i = 0; i < chs->num; i++) + err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, + err ? 0 : 20000); - return err; + return err ? -ETIMEDOUT : 0; } static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) @@ -2580,15 +2808,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) netdev_set_tc_queue(netdev, tc, nch, 0); } -static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) +static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv) { - struct mlx5e_channel *c; - struct mlx5e_txqsq *sq; + int max_nch = priv->profile->max_nch(priv->mdev); int i, tc; - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (tc = 0; tc < priv->profile->max_tc; tc++) - priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num; + priv->channel_tc2txq[i][tc] = i + tc * max_nch; +} + +static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv) +{ + struct mlx5e_channel *c; + struct mlx5e_txqsq *sq; + int i, tc; for (i = 0; i < priv->channels.num; i++) { c = priv->channels.c[i]; @@ -2608,7 +2842,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->channels.num); - mlx5e_build_channels_tx_maps(priv); + mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); @@ -2760,8 +2994,8 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, param->wq.db_numa_node = param->wq.buf_numa_node; - err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, - &rq->wq_ctrl); + err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq, + &rq->wq_ctrl); if (err) return err; @@ -3085,6 +3319,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev, if (err) goto out; + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, + new_channels.params.num_tc); mlx5e_switch_priv_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); @@ -3093,22 +3329,23 @@ out: #ifdef CONFIG_MLX5_ESWITCH static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *cls_flower) + struct tc_cls_flower_offload *cls_flower, + int flags) { switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, cls_flower); + return mlx5e_configure_flower(priv, cls_flower, flags); case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, cls_flower); + return mlx5e_delete_flower(priv, cls_flower, flags); case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, cls_flower); + return mlx5e_stats_flower(priv, cls_flower, flags); default: return -EOPNOTSUPP; } } -int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) +static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { struct mlx5e_priv *priv = cb_priv; @@ -3117,7 +3354,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(priv, type_data); + return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); default: return -EOPNOTSUPP; } @@ -3174,6 +3411,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); } else { + mlx5e_grp_sw_update_stats(priv); stats->rx_packets = sstats->rx_packets; stats->rx_bytes = sstats->rx_bytes; stats->tx_packets = sstats->tx_packets; @@ -3248,12 +3486,18 @@ static int set_feature_lro(struct net_device *netdev, bool enable) mutex_lock(&priv->state_lock); old_params = &priv->channels.params; + if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + netdev_warn(netdev, "can't set LRO with legacy RQ\n"); + err = -EINVAL; + goto out; + } + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); new_channels.params = *old_params; new_channels.params.lro_en = enable; - if (old_params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params) == mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params)) reset = false; @@ -3417,22 +3661,31 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, netdev_features_t features) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params *params; mutex_lock(&priv->state_lock); + params = &priv->channels.params; if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { /* HW strips the outer C-tag header, this is a problem * for S-tag traffic. */ features &= ~NETIF_F_HW_VLAN_CTAG_RX; - if (!priv->channels.params.vlan_strip_disable) + if (!params->vlan_strip_disable) netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); } + if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + features &= ~NETIF_F_LRO; + if (params->lro_en) + netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); + } + mutex_unlock(&priv->state_lock); return features; } -static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) +int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, + change_hw_mtu_cb set_mtu_cb) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; @@ -3450,7 +3703,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; - if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) { + if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); @@ -3459,7 +3712,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) if (!reset) { params->sw_mtu = new_mtu; - mlx5e_set_dev_port_mtu(priv); + set_mtu_cb(priv); netdev->mtu = params->sw_mtu; goto out; } @@ -3468,7 +3721,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) if (err) goto out; - mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu); + mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb); netdev->mtu = new_channels.params.sw_mtu; out: @@ -3476,6 +3729,11 @@ out: return err; } +static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); +} + int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) { struct hwtstamp_config config; @@ -3770,7 +4028,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, return false; netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); - sq->channel->stats.eq_rearm++; + sq->channel->stats->eq_rearm++; return true; } @@ -3970,7 +4228,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, .ndo_fix_features = mlx5e_fix_features, - .ndo_change_mtu = mlx5e_change_mtu, + .ndo_change_mtu = mlx5e_change_nic_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, @@ -4038,7 +4296,7 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) u32 link_speed = 0; u32 pci_bw = 0; - mlx5e_get_max_linkspeed(mdev, &link_speed); + mlx5e_port_max_linkspeed(mdev, &link_speed); pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL); mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n", link_speed, pci_bw); @@ -4049,18 +4307,48 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev) link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw; } -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +static struct net_dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) { - params->tx_cq_moderation.cq_period_mode = cq_period_mode; + struct net_dim_cq_moder moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; - params->tx_cq_moderation.pkts = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; - params->tx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + return moder; +} + +static struct net_dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) +{ + struct net_dim_cq_moder moder; + moder.cq_period_mode = cq_period_mode; + moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - params->tx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} + +static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) +{ + return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? + NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE : + NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; +} + +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + if (params->tx_dim_enabled) { + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); + + params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); + } else { + params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); + } MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, params->tx_cq_moderation.cq_period_mode == @@ -4069,28 +4357,12 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) { - params->rx_cq_moderation.cq_period_mode = cq_period_mode; - - params->rx_cq_moderation.pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - params->rx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - params->rx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; - if (params->rx_dim_enabled) { - switch (cq_period_mode) { - case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: - params->rx_cq_moderation = - net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE); - break; - case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: - default: - params->rx_cq_moderation = - net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE); - } + u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); + + params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); + } else { + params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); } MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, @@ -4135,9 +4407,16 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def); /* RQ */ - if (mlx5e_striding_rq_possible(mdev, params)) - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, - !slow_pci_heuristic(mdev)); + /* Prefer Striding RQ, unless any of the following holds: + * - Striding RQ configuration is not possible/supported. + * - Slow PCI heuristic. + * - Legacy RQ would use linear SKB while Striding RQ would use non-linear. + */ + if (!slow_pci_heuristic(mdev) && + mlx5e_striding_rq_possible(mdev, params) && + (mlx5e_rx_mpwqe_is_linear_skb(mdev, params) || + !mlx5e_rx_is_linear_skb(mdev, params))) + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true); mlx5e_set_rq_type(mdev, params); mlx5e_init_rq_type_params(mdev, params); @@ -4154,6 +4433,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); @@ -4179,6 +4459,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->profile = profile; priv->ppriv = ppriv; priv->msglevel = MLX5E_MSG_LEVEL; + priv->max_opened_tc = 1; mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev), netdev->mtu); @@ -4243,7 +4524,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; - if (!!MLX5_CAP_ETH(mdev, lro_cap)) + if (!!MLX5_CAP_ETH(mdev, lro_cap) && + mlx5e_check_fragmented_striding_rq_cap(mdev)) netdev->vlan_features |= NETIF_F_LRO; netdev->hw_features = netdev->vlan_features; @@ -4320,6 +4602,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) #endif mlx5e_ipsec_build_netdev(priv); + mlx5e_tls_build_netdev(priv); } static void mlx5e_create_q_counters(struct mlx5e_priv *priv) @@ -4361,12 +4644,17 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev, err = mlx5e_ipsec_init(priv); if (err) mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err); + err = mlx5e_tls_init(priv); + if (err) + mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); + mlx5e_build_tc2txq_maps(priv); mlx5e_vxlan_init(priv); } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { + mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); mlx5e_vxlan_cleanup(priv); } @@ -4398,7 +4686,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) goto err_destroy_direct_tirs; } - err = mlx5e_tc_init(priv); + err = mlx5e_tc_nic_init(priv); if (err) goto err_destroy_flow_steering; @@ -4419,7 +4707,7 @@ err_destroy_indirect_rqts: static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { - mlx5e_tc_cleanup(priv); + mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_indirect_tirs(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 876c3e4c6193..57987f6546e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -66,18 +66,36 @@ static const struct counter_desc sw_rep_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, }; -#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) +struct vport_stats { + u64 vport_rx_packets; + u64 vport_tx_packets; + u64 vport_rx_bytes; + u64 vport_tx_bytes; +}; + +static const struct counter_desc vport_rep_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) }, + { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) }, + { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) }, + { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) }, +}; + +#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) +#define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc) static void mlx5e_rep_get_strings(struct net_device *dev, u32 stringset, uint8_t *data) { - int i; + int i, j; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) strcpy(data + (i * ETH_GSTRING_LEN), sw_rep_stats_desc[i].format); + for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) + strcpy(data + (i * ETH_GSTRING_LEN), + vport_rep_stats_desc[j].format); break; } } @@ -116,13 +134,13 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - rq_stats = &c->rq.stats; + rq_stats = c->rq.stats; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = &c->sq[j].stats; + sq_stats = c->sq[j].stats; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; @@ -130,17 +148,11 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) } } -static void mlx5e_rep_update_stats(struct mlx5e_priv *priv) -{ - mlx5e_rep_update_sw_counters(priv); - mlx5e_rep_update_hw_counters(priv); -} - static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mlx5e_priv *priv = netdev_priv(dev); - int i; + int i, j; if (!data) return; @@ -148,18 +160,23 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_rep_update_sw_counters(priv); + mlx5e_rep_update_hw_counters(priv); mutex_unlock(&priv->state_lock); - for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++) data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_rep_stats_desc, i); + + for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++) + data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, + vport_rep_stats_desc, j); } static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return NUM_VPORT_REP_COUNTERS; + return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS; default: return -EOPNOTSUPP; } @@ -681,8 +698,8 @@ static int mlx5e_rep_open(struct net_device *dev) goto unlock; if (!mlx5_modify_vport_admin_state(priv->mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -699,8 +716,8 @@ static int mlx5e_rep_close(struct net_device *dev) mutex_lock(&priv->state_lock); mlx5_modify_vport_admin_state(priv->mdev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; @@ -723,15 +740,31 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, static int mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *cls_flower) + struct tc_cls_flower_offload *cls_flower, int flags) { switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, cls_flower); + return mlx5e_configure_flower(priv, cls_flower, flags); case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, cls_flower); + return mlx5e_delete_flower(priv, cls_flower, flags); case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, cls_flower); + return mlx5e_stats_flower(priv, cls_flower, flags); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct mlx5e_priv *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS); default: return -EOPNOTSUPP; } @@ -747,7 +780,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(priv, type_data); + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); default: return -EOPNOTSUPP; } @@ -832,6 +865,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_sw_stats *sstats = &priv->stats.sw; + mlx5e_rep_update_sw_counters(priv); + stats->rx_packets = sstats->rx_packets; stats->rx_bytes = sstats->rx_bytes; stats->tx_packets = sstats->tx_packets; @@ -865,6 +900,11 @@ static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; +static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, NULL); +} + static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_open = mlx5e_rep_open, .ndo_stop = mlx5e_rep_close, @@ -874,6 +914,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, + .ndo_change_mtu = mlx5e_change_rep_mtu, }; static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, @@ -886,7 +927,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->hard_mtu = MLX5E_ETH_HARD_MTU; params->sw_mtu = mtu; params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; - params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC; params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE; params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); @@ -900,6 +941,10 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, static void mlx5e_build_rep_netdev(struct net_device *netdev) { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 max_mtu; + netdev->netdev_ops = &mlx5e_netdev_ops_rep; netdev->watchdog_timeo = 15 * HZ; @@ -912,6 +957,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_TC; eth_hw_addr_random(netdev); + + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); } static void mlx5e_init_rep(struct mlx5_core_dev *mdev, @@ -965,14 +1014,8 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) } rpriv->vport_rx_rule = flow_rule; - err = mlx5e_tc_init(priv); - if (err) - goto err_del_flow_rule; - return 0; -err_del_flow_rule: - mlx5_del_flow_rules(rpriv->vport_rx_rule); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: @@ -984,7 +1027,6 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - mlx5e_tc_cleanup(priv); mlx5_del_flow_rules(rpriv->vport_rx_rule); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_rqts(priv); @@ -1014,7 +1056,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .cleanup_rx = mlx5e_cleanup_rep_rx, .init_tx = mlx5e_init_rep_tx, .cleanup_tx = mlx5e_cleanup_nic_tx, - .update_stats = mlx5e_rep_update_stats, + .update_stats = mlx5e_rep_update_hw_counters, .max_nch = mlx5e_get_rep_max_num_channels, .update_carrier = NULL, .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, @@ -1042,8 +1084,15 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) if (err) goto err_remove_sqs; + /* init shared tc flow table */ + err = mlx5e_tc_esw_init(&rpriv->tc_ht); + if (err) + goto err_neigh_cleanup; + return 0; +err_neigh_cleanup: + mlx5e_rep_neigh_cleanup(rpriv); err_remove_sqs: mlx5e_remove_sqs_fwd_rules(priv); return err; @@ -1058,9 +1107,8 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep) if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_remove_sqs_fwd_rules(priv); - /* clean (and re-init) existing uplink offloaded TC rules */ - mlx5e_tc_cleanup(priv); - mlx5e_tc_init(priv); + /* clean uplink offloaded TC rules, delete shared tc flow table */ + mlx5e_tc_esw_cleanup(&rpriv->tc_ht); mlx5e_rep_neigh_cleanup(rpriv); } @@ -1107,7 +1155,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH); upriv = netdev_priv(uplink_rpriv->netdev); - err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb, + err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev, upriv); if (err) goto err_neigh_cleanup; @@ -1122,7 +1170,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) return 0; err_egdev_cleanup: - tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev, upriv); err_neigh_cleanup: @@ -1151,7 +1199,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH); upriv = netdev_priv(uplink_rpriv->netdev); - tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev, upriv); mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index b9b481f2833a..844d32d5c29f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -59,6 +59,7 @@ struct mlx5e_rep_priv { struct net_device *netdev; struct mlx5_flow_handle *vport_rx_rule; struct list_head vport_sqs_list; + struct rhashtable tc_ht; /* valid for uplink rep */ }; static inline diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1ff0b0e93804..d3a1dd20e41d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -37,6 +37,7 @@ #include <linux/bpf_trace.h> #include <net/busy_poll.h> #include <net/ip6_checksum.h> +#include <net/page_pool.h> #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -53,7 +54,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, void *data) { - u32 ci = cqcc & cq->wq.fbc.sz_m1; + u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc); memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); } @@ -64,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, mlx5e_read_cqe_slot(cq, cqcc, &cq->title); cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); - rq->stats.cqe_compress_blks++; + rq->stats->cqe_compress_blks++; } static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) @@ -75,10 +76,11 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) { - struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc; - u8 op_own = (cqcc >> fbc->log_sz) & 1; - u32 wq_sz = 1 << fbc->log_sz; - u32 ci = cqcc & fbc->sz_m1; + struct mlx5_cqwq *wq = &cq->wq; + + u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; + u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); + u32 wq_sz = mlx5_cqwq_get_size(wq); u32 ci_top = min_t(u32, wq_sz, ci + n); for (; ci < ci_top; ci++, n--) { @@ -111,7 +113,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, mpwrq_get_cqe_consumed_strides(&cq->title); else cq->decmprs_wqe_counter = - (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1; + mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); } static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, @@ -144,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); cq->wq.cc = cqcc; cq->decmprs_left -= cqe_count; - rq->stats.cqe_compress_pkts += cqe_count; + rq->stats->cqe_compress_pkts += cqe_count; return cqe_count; } @@ -162,8 +164,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; } -#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT) - static inline bool mlx5e_page_is_reserved(struct page *page) { return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); @@ -174,14 +174,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, { struct mlx5e_page_cache *cache = &rq->page_cache; u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); + struct mlx5e_rq_stats *stats = rq->stats; if (tail_next == cache->head) { - rq->stats.cache_full++; + stats->cache_full++; return false; } if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { - rq->stats.cache_waive++; + stats->cache_waive++; return false; } @@ -194,23 +195,24 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) { struct mlx5e_page_cache *cache = &rq->page_cache; + struct mlx5e_rq_stats *stats = rq->stats; if (unlikely(cache->head == cache->tail)) { - rq->stats.cache_empty++; + stats->cache_empty++; return false; } if (page_ref_count(cache->page_cache[cache->head].page) != 1) { - rq->stats.cache_busy++; + stats->cache_busy++; return false; } *dma_info = cache->page_cache[cache->head]; cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); - rq->stats.cache_reuse++; + stats->cache_reuse++; dma_sync_single_for_device(rq->pdev, dma_info->addr, - RQ_PAGE_SIZE(rq), + PAGE_SIZE, DMA_FROM_DEVICE); return true; } @@ -221,12 +223,12 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, if (mlx5e_rx_cache_get(rq, dma_info)) return 0; - dma_info->page = dev_alloc_pages(rq->buff.page_order); + dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); if (unlikely(!dma_info->page)) return -ENOMEM; dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, - RQ_PAGE_SIZE(rq), rq->buff.map_dir); + PAGE_SIZE, rq->buff.map_dir); if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { put_page(dma_info->page); dma_info->page = NULL; @@ -236,73 +238,124 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, return 0; } +static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, + struct mlx5e_dma_info *dma_info) +{ + dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); +} + void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle) { - if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info)) - return; + if (likely(recycle)) { + if (mlx5e_rx_cache_put(rq, dma_info)) + return; - dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq), - rq->buff.map_dir); - put_page(dma_info->page); + mlx5e_page_dma_unmap(rq, dma_info); + page_pool_recycle_direct(rq->page_pool, dma_info->page); + } else { + mlx5e_page_dma_unmap(rq, dma_info); + put_page(dma_info->page); + } } -static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) +static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, + struct mlx5e_wqe_frag_info *frag) { - return rq->wqe.page_reuse && wi->di.page && - (wi->offset + rq->wqe.frag_sz <= RQ_PAGE_SIZE(rq)) && - !mlx5e_page_is_reserved(wi->di.page); + int err = 0; + + if (!frag->offset) + /* On first frag (offset == 0), replenish page (dma_info actually). + * Other frags that point to the same dma_info (with a different + * offset) should just use the new one without replenishing again + * by themselves. + */ + err = mlx5e_page_alloc_mapped(rq, frag->di); + + return err; } -static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, + struct mlx5e_wqe_frag_info *frag) { - struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; + if (frag->last_in_page) + mlx5e_page_release(rq, frag->di, true); +} - /* check if page exists, hence can be reused */ - if (!wi->di.page) { - if (unlikely(mlx5e_page_alloc_mapped(rq, &wi->di))) - return -ENOMEM; - wi->offset = 0; +static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) +{ + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; +} + +static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe, + u16 ix) +{ + struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix); + int err; + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { + err = mlx5e_get_rx_frag(rq, frag); + if (unlikely(err)) + goto free_frags; + + wqe->data[i].addr = cpu_to_be64(frag->di->addr + + frag->offset + rq->buff.headroom); } - wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom); return 0; + +free_frags: + while (--i >= 0) + mlx5e_put_rx_frag(rq, --frag); + + return err; } static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi) { - mlx5e_page_release(rq, &wi->di, true); - wi->di.page = NULL; + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + mlx5e_put_rx_frag(rq, wi); } -static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq, - struct mlx5e_wqe_frag_info *wi) +void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) { - if (mlx5e_page_reuse(rq, wi)) { - rq->stats.page_reuse++; - return; - } + struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); mlx5e_free_rx_wqe(rq, wi); } -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) +static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) { - struct mlx5e_wqe_frag_info *wi = &rq->wqe.frag_info[ix]; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + int err; + int i; - if (wi->di.page) - mlx5e_free_rx_wqe(rq, wi); + for (i = 0; i < wqe_bulk; i++) { + struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); + + err = mlx5e_alloc_rx_wqe(rq, wqe, ix + i); + if (unlikely(err)) + goto free_wqes; + } + + return 0; + +free_wqes: + while (--i >= 0) + mlx5e_dealloc_rx_wqe(rq, ix + i); + + return err; } -static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, - struct sk_buff *skb, - struct mlx5e_dma_info *di, - u32 frag_offset, u32 len) +static inline void +mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5e_dma_info *di, u32 frag_offset, u32 len, + unsigned int truesize) { - unsigned int truesize = ALIGN(len, BIT(rq->mpwqe.log_stride_sz)); - dma_sync_single_for_cpu(rq->pdev, di->addr + frag_offset, len, DMA_FROM_DEVICE); @@ -312,29 +365,33 @@ static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq, } static inline void +mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, + struct mlx5e_dma_info *dma_info, + int offset_from, int offset_to, u32 headlen) +{ + const void *from = page_address(dma_info->page) + offset_from; + /* Aligning len to sizeof(long) optimizes memcpy performance */ + unsigned int len = ALIGN(headlen, sizeof(long)); + + dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data_offset(skb, offset_to, from, len); +} + +static inline void mlx5e_copy_skb_header_mpwqe(struct device *pdev, struct sk_buff *skb, struct mlx5e_dma_info *dma_info, u32 offset, u32 headlen) { u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); - unsigned int len; - /* Aligning len to sizeof(long) optimizes memcpy performance */ - len = ALIGN(headlen_pg, sizeof(long)); - dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, - DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(dma_info->page) + offset, len); + mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg); if (unlikely(offset + headlen > PAGE_SIZE)) { dma_info++; - headlen_pg = len; - len = ALIGN(headlen - headlen_pg, sizeof(long)); - dma_sync_single_for_cpu(pdev, dma_info->addr, len, - DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(skb, headlen_pg, - page_address(dma_info->page), - len); + mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg, + headlen - headlen_pg); } } @@ -352,8 +409,8 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; + struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); rq->mpwqe.umr_in_progress = false; @@ -370,6 +427,22 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; } +static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, + struct mlx5_wq_cyc *wq, + u16 pi, u16 frag_pi) +{ + struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; + u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; + + edge_wi = wi + nnops; + + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->opcode = MLX5_OPCODE_NOP; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } +} + static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@ -378,14 +451,16 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_umr_wqe *umr_wqe; u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); + u16 pi, frag_pi; int err; - u16 pi; int i; - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + + if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) { + mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi); + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); } umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@ -421,7 +496,7 @@ err_unmap: dma_info--; mlx5e_page_release(rq, dma_info, true); } - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return err; } @@ -435,31 +510,34 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; + u8 wqe_bulk; int err; - if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; - if (mlx5_wq_ll_is_full(wq)) + wqe_bulk = rq->wqe.info.wqe_bulk; + + if (mlx5_wq_cyc_missing(wq) < wqe_bulk) return false; do { - struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + u16 head = mlx5_wq_cyc_get_head(wq); - err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head); + err = mlx5e_alloc_rx_wqes(rq, head, wqe_bulk); if (unlikely(err)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; break; } - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); - } while (!mlx5_wq_ll_is_full(wq)); + mlx5_wq_cyc_push_n(wq, wqe_bulk); + } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); /* ensure wqes are visible to device before updating doorbell record */ dma_wmb(); - mlx5_wq_ll_update_db_record(wq); + mlx5_wq_cyc_update_db_record(wq); return !!err; } @@ -470,7 +548,7 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; + u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; mlx5_cqwq_pop(&cq->wq); @@ -496,7 +574,7 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); struct mlx5_cqe64 *cqe; - if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return; cqe = mlx5_cqwq_get_cqe(&cq->wq); @@ -511,9 +589,9 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { - struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5_wq_ll *wq = &rq->mpwqe.wq; - if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return false; mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq); @@ -660,6 +738,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, struct sk_buff *skb, bool lro) { + struct mlx5e_rq_stats *stats = rq->stats; int network_depth = 0; if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) @@ -667,7 +746,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; - rq->stats.csum_unnecessary++; + stats->csum_unnecessary++; return; } @@ -685,7 +764,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (unlikely(netdev->features & NETIF_F_RXFCS)) skb->csum = csum_add(skb->csum, (__force __wsum)mlx5e_get_fcs(skb)); - rq->stats.csum_complete++; + stats->csum_complete++; return; } @@ -695,15 +774,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (cqe_is_tunneled(cqe)) { skb->csum_level = 1; skb->encapsulation = 1; - rq->stats.csum_unnecessary_inner++; + stats->csum_unnecessary_inner++; return; } - rq->stats.csum_unnecessary++; + stats->csum_unnecessary++; return; } csum_none: skb->ip_summed = CHECKSUM_NONE; - rq->stats.csum_none++; + stats->csum_none++; } static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, @@ -711,20 +790,20 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb) { + u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; + struct mlx5e_rq_stats *stats = rq->stats; struct net_device *netdev = rq->netdev; - int lro_num_seg; skb->mac_len = ETH_HLEN; - lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); /* Subtract one since we already counted this as one * "regular" packet in mlx5e_complete_rx_cqe() */ - rq->stats.packets += lro_num_seg - 1; - rq->stats.lro_packets++; - rq->stats.lro_bytes += cqe_bcnt; + stats->packets += lro_num_seg - 1; + stats->lro_packets++; + stats->lro_bytes += cqe_bcnt; } if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) @@ -739,7 +818,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (cqe_has_vlan(cqe)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); - rq->stats.removed_vlan_packets++; + stats->removed_vlan_packets++; } skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; @@ -753,8 +832,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - rq->stats.packets++; - rq->stats.bytes += cqe_bcnt; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->packets++; + stats->bytes += cqe_bcnt; mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); } @@ -762,7 +843,7 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5e_tx_wqe *wqe; - u16 pi = (sq->pc - 1) & wq->sz_m1; /* last pi */ + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ wqe = mlx5_wq_cyc_get_wqe(wq, pi); @@ -775,7 +856,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, { struct mlx5e_xdpsq *sq = &rq->xdpsq; struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; @@ -786,10 +867,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, dma_addr_t dma_addr = di->addr + data_offset; unsigned int dma_len = xdp->data_end - xdp->data; + struct mlx5e_rq_stats *stats = rq->stats; + prefetchw(wqe); if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { - rq->stats.xdp_drop++; + stats->xdp_drop++; return false; } @@ -799,7 +882,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, mlx5e_xmit_xdp_doorbell(sq); sq->db.doorbell = false; } - rq->stats.xdp_tx_full++; + stats->xdp_tx_full++; return false; } @@ -833,18 +916,19 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, sq->db.doorbell = true; - rq->stats.xdp_tx++; + stats->xdp_tx++; return true; } /* returns true if packet was consumed by xdp */ -static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, - struct mlx5e_dma_info *di, - void *va, u16 *rx_headroom, u32 *len) +static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, + struct mlx5e_dma_info *di, + void *va, u16 *rx_headroom, u32 *len) { - const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); + struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); struct xdp_buff xdp; u32 act; + int err; if (!prog) return false; @@ -865,12 +949,21 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) trace_xdp_exception(rq->netdev, prog, act); return true; + case XDP_REDIRECT: + /* When XDP enabled then page-refcnt==1 here */ + err = xdp_do_redirect(rq->netdev, &xdp, prog); + if (!err) { + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); + rq->xdpsq.db.redirect_flush = true; + mlx5e_page_dma_unmap(rq, di); + } + return true; default: bpf_warn_invalid_xdp_action(act); case XDP_ABORTED: trace_xdp_exception(rq->netdev, prog, act); case XDP_DROP: - rq->stats.xdp_drop++; + rq->stats->xdp_drop++; return true; } } @@ -883,7 +976,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, struct sk_buff *skb = build_skb(va, frag_size); if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return NULL; } @@ -893,11 +986,11 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, return skb; } -static inline -struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +struct sk_buff * +mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { - struct mlx5e_dma_info *di = &wi->di; + struct mlx5e_dma_info *di = wi->di; u16 rx_headroom = rq->buff.headroom; struct sk_buff *skb; void *va, *data; @@ -910,11 +1003,11 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ prefetch(data); - wi->offset += frag_size; if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; + rq->stats->wqe_err++; return NULL; } @@ -934,41 +1027,87 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return skb; } +struct sk_buff * +mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) +{ + struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct mlx5e_wqe_frag_info *head_wi = wi; + u16 headlen = min_t(u32, MLX5E_RX_MAX_HEAD, cqe_bcnt); + u16 frag_headlen = headlen; + u16 byte_cnt = cqe_bcnt - headlen; + struct sk_buff *skb; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats->wqe_err++; + return NULL; + } + + /* XDP is not supported in this configuration, as incoming packets + * might spread among multiple pages. + */ + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); + + while (byte_cnt) { + u16 frag_consumed_bytes = + min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); + + mlx5e_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, + frag_consumed_bytes, frag_info->frag_stride); + byte_cnt -= frag_consumed_bytes; + frag_headlen = 0; + frag_info++; + wi++; + } + + /* copy header */ + mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, + 0, headlen); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + return skb; +} + void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - wi->di.page = NULL; - /* do not return page to cache, it will be returned on XDP_TX completion */ - goto wq_ll_pop; + /* do not return page to cache, + * it will be returned on XDP_TX completion. + */ + goto wq_cyc_pop; } - /* probably an XDP_DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto free_wqe; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); +free_wqe: + mlx5e_free_rx_wqe(rq, wi); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #ifdef CONFIG_MLX5_ESWITCH @@ -978,29 +1117,26 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; struct sk_buff *skb; - __be16 wqe_counter_be; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) { + /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { - wi->di.page = NULL; - /* do not return page to cache, it will be returned on XDP_TX completion */ - goto wq_ll_pop; + /* do not return page to cache, + * it will be returned on XDP_TX completion. + */ + goto wq_cyc_pop; } - /* probably an XDP_DROP, save the page-reuse checks */ - mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto free_wqe; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); @@ -1010,10 +1146,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); +free_wqe: + mlx5e_free_rx_wqe(rq, wi); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #endif @@ -1021,7 +1157,7 @@ struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) { - u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt); struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; u32 frag_offset = head_offset + headlen; u32 byte_cnt = cqe_bcnt - headlen; @@ -1029,9 +1165,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w struct sk_buff *skb; skb = napi_alloc_skb(rq->cq.napi, - ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); + ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long))); if (unlikely(!skb)) { - rq->stats.buff_alloc_err++; + rq->stats->buff_alloc_err++; return NULL; } @@ -1045,9 +1181,11 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w while (byte_cnt) { u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + unsigned int truesize = + ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - mlx5e_add_skb_frag_mpwqe(rq, skb, di, frag_offset, - pg_consumed_bytes); + mlx5e_add_skb_frag(rq, skb, di, frag_offset, + pg_consumed_bytes, truesize); byte_cnt -= pg_consumed_bytes; frag_offset = 0; di++; @@ -1110,19 +1248,20 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; u32 head_offset = wqe_offset & (PAGE_SIZE - 1); u32 page_idx = wqe_offset >> PAGE_SHIFT; - struct mlx5e_rx_wqe *wqe; + struct mlx5e_rx_wqe_ll *wqe; + struct mlx5_wq_ll *wq; struct sk_buff *skb; u16 cqe_bcnt; wi->consumed_strides += cstrides; if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; + rq->stats->wqe_err++; goto mpwrq_cqe_out; } if (unlikely(mpwrq_is_filler_cqe(cqe))) { - rq->stats.mpwqe_filler++; + rq->stats->mpwqe_filler++; goto mpwrq_cqe_out; } @@ -1140,9 +1279,10 @@ mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + wq = &rq->mpwqe.wq; + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); mlx5e_free_rx_mpwqe(rq, wi); - mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); } int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) @@ -1152,7 +1292,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5_cqe64 *cqe; int work_done = 0; - if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; if (cq->decmprs_left) @@ -1182,6 +1322,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) xdpsq->db.doorbell = false; } + if (xdpsq->db.redirect_flush) { + xdp_do_flush_map(); + xdpsq->db.redirect_flush = false; + } + mlx5_cqwq_update_db_record(&cq->wq); /* ensure cq space is freed before enabling more cqes */ @@ -1200,7 +1345,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) sq = container_of(cq, struct mlx5e_xdpsq, cq); - if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return false; cqe = mlx5_cqwq_get_cqe(&cq->wq); @@ -1229,7 +1374,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) last_wqe = (sqcc == wqe_counter); - ci = sqcc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); di = &sq->db.di[ci]; sqcc++; @@ -1254,7 +1399,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) u16 ci; while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); di = &sq->db.di[ci]; sq->cc++; @@ -1272,6 +1417,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { + struct mlx5e_rq_stats *stats = rq->stats; struct hwtstamp_config *tstamp; struct net_device *netdev; struct mlx5e_priv *priv; @@ -1333,27 +1479,24 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, skb->dev = netdev; - rq->stats.csum_complete++; - rq->stats.packets++; - rq->stats.bytes += cqe_bcnt; + stats->csum_complete++; + stats->packets++; + stats->bytes += cqe_bcnt; } void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (!skb) goto wq_free_wqe; @@ -1365,9 +1508,8 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) napi_gro_receive(rq->cq.napi, skb); wq_free_wqe: - mlx5e_free_rx_wqe_reuse(rq, wi); - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); + mlx5e_free_rx_wqe(rq, wi); + mlx5_wq_cyc_pop(wq); } #endif /* CONFIG_MLX5_CORE_IPOIB */ @@ -1376,38 +1518,34 @@ wq_free_wqe: void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { + struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_counter_be; struct sk_buff *skb; - u16 wqe_counter; u32 cqe_bcnt; + u16 ci; - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - wi = &rq->wqe.frag_info[wqe_counter]; - cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); + wi = get_frag(rq, ci); + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); - skb = skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); if (unlikely(!skb)) { /* a DROP, save the page-reuse checks */ mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb); if (unlikely(!skb)) { mlx5e_free_rx_wqe(rq, wi); - goto wq_ll_pop; + goto wq_cyc_pop; } mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); napi_gro_receive(rq->cq.napi, skb); - mlx5e_free_rx_wqe_reuse(rq, wi); -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); + mlx5e_free_rx_wqe(rq, wi); +wq_cyc_pop: + mlx5_wq_cyc_pop(wq); } #endif /* CONFIG_MLX5_EN_IPSEC */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 027f54ac1ca2..4d316cc9b008 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -100,7 +100,7 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv) #ifdef CONFIG_INET /* loopback test */ -#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN) +#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; #define MLX5E_TEST_MAGIC 0x5AEED15C001ULL diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index b08c94422907..1646859974ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -32,6 +32,7 @@ #include "en.h" #include "en_accel/ipsec.h" +#include "en_accel/tls.h" static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, @@ -43,6 +44,12 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, + +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, +#endif + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, @@ -57,11 +64,11 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, @@ -74,7 +81,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_eq_rearm) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, }; #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) @@ -102,20 +108,19 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } -static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) +void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) { struct mlx5e_sw_stats temp, *s = &temp; - struct mlx5e_rq_stats *rq_stats; - struct mlx5e_sq_stats *sq_stats; - struct mlx5e_ch_stats *ch_stats; - int i, j; + int i; memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->channels.num; i++) { - struct mlx5e_channel *c = priv->channels.c[i]; - rq_stats = &c->rq.stats; - ch_stats = &c->stats; + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + struct mlx5e_channel_stats *channel_stats = + &priv->channel_stats[i]; + struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; + struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; + int j; s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; @@ -142,8 +147,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_cache_waive += rq_stats->cache_waive; s->ch_eq_rearm += ch_stats->eq_rearm; - for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = &c->sq[j].stats; + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; @@ -161,12 +166,13 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_csum_partial_inner += sq_stats->csum_partial_inner; s->tx_csum_none += sq_stats->csum_none; s->tx_csum_partial += sq_stats->csum_partial; +#ifdef CONFIG_MLX5_EN_TLS + s->tx_tls_ooo += sq_stats->tls_ooo; + s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; +#endif } } - s->link_down_events_phy = MLX5_GET(ppcnt_reg, - priv->stats.pport.phy_counters, - counter_set.phys_layer_cntrs.link_down_events); memcpy(&priv->stats.sw, s, sizeof(*s)); } @@ -569,12 +575,13 @@ static const struct counter_desc pport_phy_statistical_stats_desc[] = { { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, }; -#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) +#define NUM_PPORT_PHY_STATISTICAL_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) { + /* "1" for link_down_events special counter */ return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ? - NUM_PPORT_PHY_COUNTERS : 0; + NUM_PPORT_PHY_STATISTICAL_COUNTERS + 1 : 1; } static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, @@ -582,10 +589,14 @@ static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, { int i; - if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) - for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_phy_statistical_stats_desc[i].format); + strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy"); + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + return idx; + + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); return idx; } @@ -593,11 +604,17 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { int i; - if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) - for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) - data[idx++] = - MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, - pport_phy_statistical_stats_desc, i); + /* link_down_events_phy has special handling since it is not stored in __be64 format */ + data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, + counter_set.phys_layer_cntrs.link_down_events); + + if (!MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + return idx; + + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); return idx; } @@ -1065,6 +1082,22 @@ static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv) mlx5e_ipsec_update_stats(priv); } +static int mlx5e_grp_tls_get_num_stats(struct mlx5e_priv *priv) +{ + return mlx5e_tls_get_count(priv); +} + +static int mlx5e_grp_tls_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + return idx + mlx5e_tls_get_strings(priv, data + idx * ETH_GSTRING_LEN); +} + +static int mlx5e_grp_tls_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +{ + return idx + mlx5e_tls_get_stats(priv, data + idx); +} + static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, @@ -1104,11 +1137,11 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, recover) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, }; static const struct counter_desc ch_stats_desc[] = { @@ -1121,30 +1154,30 @@ static const struct counter_desc ch_stats_desc[] = { static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) { - return (NUM_RQ_STATS * priv->channels.num) + - (NUM_CH_STATS * priv->channels.num) + - (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc); + int max_nch = priv->profile->max_nch(priv->mdev); + + return (NUM_RQ_STATS * max_nch) + + (NUM_CH_STATS * max_nch) + + (NUM_SQ_STATS * max_nch * priv->max_opened_tc); } static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) { + int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return idx; - - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, ch_stats_desc[j].format, i); - for (i = 0; i < priv->channels.num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_RQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < priv->channels.num; i++) + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, sq_stats_desc[j].format, @@ -1156,29 +1189,26 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { - struct mlx5e_channels *channels = &priv->channels; + int max_nch = priv->profile->max_nch(priv->mdev); int i, j, tc; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return idx; - - for (i = 0; i < channels->num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_CH_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, ch_stats_desc, j); - for (i = 0; i < channels->num; i++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_RQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, rq_stats_desc, j); - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < channels->num; i++) + for (tc = 0; tc < priv->max_opened_tc; tc++) + for (i = 0; i < max_nch; i++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], sq_stats_desc, j); return idx; @@ -1190,7 +1220,6 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .get_num_stats = mlx5e_grp_sw_get_num_stats, .fill_strings = mlx5e_grp_sw_fill_strings, .fill_stats = mlx5e_grp_sw_fill_stats, - .update_stats_mask = MLX5E_NDO_UPDATE_STATS, .update_stats = mlx5e_grp_sw_update_stats, }, { @@ -1268,6 +1297,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .update_stats = mlx5e_grp_ipsec_update_stats, }, { + .get_num_stats = mlx5e_grp_tls_get_num_stats, + .fill_strings = mlx5e_grp_tls_fill_strings, + .fill_stats = mlx5e_grp_tls_fill_stats, + }, + { .get_num_stats = mlx5e_grp_channels_get_num_stats, .fill_strings = mlx5e_grp_channels_fill_strings, .fill_stats = mlx5e_grp_channels_fill_stats, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 53111a2df587..643153bb3607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -75,11 +75,11 @@ struct mlx5e_sw_stats { u64 tx_csum_partial; u64 tx_csum_partial_inner; u64 tx_queue_stopped; - u64 tx_queue_wake; u64 tx_queue_dropped; u64 tx_xmit_more; - u64 tx_cqe_err; u64 tx_recover; + u64 tx_queue_wake; + u64 tx_cqe_err; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_buff_alloc_err; @@ -93,8 +93,10 @@ struct mlx5e_sw_stats { u64 rx_cache_waive; u64 ch_eq_rearm; - /* Special handling counters */ - u64 link_down_events_phy; +#ifdef CONFIG_MLX5_EN_TLS + u64 tx_tls_ooo; + u64 tx_tls_resync_bytes; +#endif }; struct mlx5e_qcounter_stats { @@ -194,13 +196,18 @@ struct mlx5e_sq_stats { u64 csum_partial_inner; u64 added_vlan_packets; u64 nop; +#ifdef CONFIG_MLX5_EN_TLS + u64 tls_ooo; + u64 tls_resync_bytes; +#endif /* less likely accessed in data path */ u64 csum_none; u64 stopped; - u64 wake; u64 dropped; - u64 cqe_err; u64 recover; + /* dirtied @completion */ + u64 wake ____cacheline_aligned_in_smp; + u64 cqe_err; }; struct mlx5e_ch_stats { @@ -233,4 +240,6 @@ struct mlx5e_stats_grp { extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const int mlx5e_num_stats_grps; +void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv); + #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b94276db3ce9..0edf4751a8ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -52,28 +52,37 @@ #include "eswitch.h" #include "vxlan.h" #include "fs_core.h" +#include "en/port.h" struct mlx5_nic_flow_attr { u32 action; u32 flow_tag; u32 mod_hdr_id; u32 hairpin_tirn; + u8 match_level; struct mlx5_flow_table *hairpin_ft; }; +#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1) + enum { - MLX5E_TC_FLOW_ESWITCH = BIT(0), - MLX5E_TC_FLOW_NIC = BIT(1), - MLX5E_TC_FLOW_OFFLOADED = BIT(2), - MLX5E_TC_FLOW_HAIRPIN = BIT(3), - MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4), + MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS, + MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS, + MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE), + MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1), + MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2), + MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3), + MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), }; +#define MLX5E_TC_MAX_SPLITS 1 + struct mlx5e_tc_flow { struct rhash_head node; + struct mlx5e_priv *priv; u64 cookie; u8 flags; - struct mlx5_flow_handle *rule; + struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; struct list_head encap; /* flows sharing the same encap ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head hairpin; /* flows sharing the same hairpin */ @@ -97,7 +106,7 @@ enum { }; #define MLX5E_TC_TABLE_NUM_GROUPS 4 -#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16) +#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16) struct mlx5e_hairpin { struct mlx5_hairpin *pair; @@ -607,7 +616,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, params.q_counter = priv->q_counter; /* set hairpin pair per each 50Gbs share of the link */ - mlx5e_get_max_linkspeed(priv->mdev, &link_speed); + mlx5e_port_max_linkspeed(priv->mdev, &link_speed); link_speed = max_t(u32, link_speed, 50000); link_speed64 = link_speed; do_div(link_speed64, 50000); @@ -753,7 +762,9 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, table_created = true; } - parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + if (attr->match_level != MLX5_MATCH_NONE) + parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, &flow_act, dest, dest_ix); @@ -785,11 +796,11 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_fc *counter = NULL; - counter = mlx5_flow_rule_counter(flow->rule); - mlx5_del_flow_rules(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); + mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { + if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } @@ -835,7 +846,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, } out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; - attr->out_rep = rpriv->rep; + attr->out_rep[attr->out_count] = rpriv->rep; + attr->out_mdev[attr->out_count++] = out_priv->mdev; } err = mlx5_eswitch_add_vlan_action(esw, attr); @@ -860,9 +872,18 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); if (IS_ERR(rule)) goto err_add_rule; + + if (attr->mirror_count) { + flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr); + if (IS_ERR(flow->rule[1])) + goto err_fwd_rule; + } } return rule; +err_fwd_rule: + mlx5_eswitch_del_offloaded_rule(esw, rule, attr); + rule = flow->rule[1]; err_add_rule: if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); @@ -883,7 +904,9 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr); + if (attr->mirror_count) + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } mlx5_eswitch_del_vlan_action(esw, attr); @@ -919,13 +942,25 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, list_for_each_entry(flow, &e->flows, encap) { esw_attr = flow->esw_attr; esw_attr->encap_id = e->encap_id; - flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); + flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); continue; } + + if (esw_attr->mirror_count) { + flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr); + if (IS_ERR(flow->rule[1])) { + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr); + err = PTR_ERR(flow->rule[1]); + mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n", + err); + continue; + } + } + flow->flags |= MLX5E_TC_FLOW_OFFLOADED; } } @@ -938,8 +973,12 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr); + if (attr->mirror_count) + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr); + mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } } @@ -974,7 +1013,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) continue; list_for_each_entry(flow, &e->flows, encap) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - counter = mlx5_flow_rule_counter(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { neigh_used = true; @@ -982,6 +1021,8 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) } } } + if (neigh_used) + break; } if (neigh_used) { @@ -1190,7 +1231,7 @@ vxlan_match_offload_err: static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, - u8 *min_inline) + u8 *match_level) { void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); @@ -1199,7 +1240,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, u16 addr_type = 0; u8 ip_proto = 0; - *min_inline = MLX5_INLINE_MODE_L2; + *match_level = MLX5_MATCH_NONE; if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | @@ -1249,58 +1290,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, inner_headers); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); - - struct flow_dissector_key_control *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->mask); - addr_type = key->addr_type; - - /* the HW doesn't support frag first/later */ - if (mask->flags & FLOW_DIS_FIRST_FRAG) - return -EOPNOTSUPP; - - if (mask->flags & FLOW_DIS_IS_FRAGMENT) { - MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, - key->flags & FLOW_DIS_IS_FRAGMENT); - - /* the HW doesn't need L3 inline to match on frag=no */ - if (key->flags & FLOW_DIS_IS_FRAGMENT) - *min_inline = MLX5_INLINE_MODE_IP; - } - } - - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - ip_proto = key->ip_proto; - - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(mask->n_proto)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(key->n_proto)); - - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - mask->ip_proto); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - key->ip_proto); - - if (mask->ip_proto) - *min_inline = MLX5_INLINE_MODE_IP; - } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_dissector_key_eth_addrs *key = skb_flow_dissector_target(f->dissector, @@ -1324,6 +1313,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16), key->src); + + if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) + *match_level = MLX5_MATCH_L2; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { @@ -1344,9 +1336,79 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); + + *match_level = MLX5_MATCH_L2; + } + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(key->n_proto)); + + if (mask->n_proto) + *match_level = MLX5_MATCH_L2; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + + struct flow_dissector_key_control *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->mask); + addr_type = key->addr_type; + + /* the HW doesn't support frag first/later */ + if (mask->flags & FLOW_DIS_FIRST_FRAG) + return -EOPNOTSUPP; + + if (mask->flags & FLOW_DIS_IS_FRAGMENT) { + MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, + key->flags & FLOW_DIS_IS_FRAGMENT); + + /* the HW doesn't need L3 inline to match on frag=no */ + if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) + *match_level = MLX5_INLINE_MODE_L2; + /* *** L2 attributes parsing up to here *** */ + else + *match_level = MLX5_INLINE_MODE_IP; } } + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + ip_proto = key->ip_proto; + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, + mask->ip_proto); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + key->ip_proto); + + if (mask->ip_proto) + *match_level = MLX5_MATCH_L3; + } + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_dissector_key_ipv4_addrs *key = skb_flow_dissector_target(f->dissector, @@ -1371,7 +1433,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, &key->dst, sizeof(key->dst)); if (mask->src || mask->dst) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { @@ -1400,7 +1462,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) { @@ -1428,9 +1490,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, return -EOPNOTSUPP; if (mask->tos || mask->ttl) - *min_inline = MLX5_INLINE_MODE_IP; + *match_level = MLX5_MATCH_L3; } + /* *** L3 attributes parsing up to here *** */ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_dissector_key_ports *key = skb_flow_dissector_target(f->dissector, @@ -1471,7 +1535,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } if (mask->src || mask->dst) - *min_inline = MLX5_INLINE_MODE_TCP_UDP; + *match_level = MLX5_MATCH_L4; } if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) { @@ -1490,7 +1554,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ntohs(key->flags)); if (mask->flags) - *min_inline = MLX5_INLINE_MODE_TCP_UDP; + *match_level = MLX5_MATCH_L4; } return 0; @@ -1505,23 +1569,28 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; - u8 min_inline; + u8 match_level; int err; - err = __parse_cls_flower(priv, spec, f, &min_inline); + err = __parse_cls_flower(priv, spec, f, &match_level); if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; if (rep->vport != FDB_UPLINK_VPORT && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && - esw->offloads.inline_mode < min_inline)) { + esw->offloads.inline_mode < match_level)) { netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", - min_inline, esw->offloads.inline_mode); + match_level, esw->offloads.inline_mode); return -EOPNOTSUPP; } } + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + flow->esw_attr->match_level = match_level; + else + flow->nic_attr->match_level = match_level; + return err; } @@ -1578,7 +1647,6 @@ struct mlx5_fields { static struct mlx5_fields fields[] = { OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0), - OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0), OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0), OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0), OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0), @@ -1764,12 +1832,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, err = -EOPNOTSUPP; /* can't be all optimistic */ if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { - printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n"); + netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n"); goto out_err; } if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { - printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd); + netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd); goto out_err; } @@ -1793,8 +1861,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { cmd_masks = &masks[cmd]; if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { - printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n", - cmd); + netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd); print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS, 16, 1, cmd_masks, sizeof(zero_masks), true); err = -EOPNOTSUPP; @@ -1917,21 +1984,21 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5_nic_flow_attr *attr = flow->nic_attr; const struct tc_action *a; LIST_HEAD(actions); + u32 action = 0; int err; if (!tcf_exts_has_actions(exts)) return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - attr->action = 0; tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { if (is_tcf_gact_shot(a)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + action |= MLX5_FLOW_CONTEXT_ACTION_DROP; if (MLX5_CAP_FLOWTABLE(priv->mdev, flow_table_properties_nic_receive.flow_counter)) - attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; continue; } @@ -1941,13 +2008,13 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (err) return err; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; continue; } if (is_tcf_csum(a)) { - if (csum_offload_supported(priv, attr->action, + if (csum_offload_supported(priv, action, tcf_csum_update_flags(a))) continue; @@ -1961,8 +2028,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, same_hw_devs(priv, netdev_priv(peer_dev))) { parse_attr->mirred_ifindex = peer_dev->ifindex; flow->flags |= MLX5E_TC_FLOW_HAIRPIN; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", peer_dev->name); @@ -1981,13 +2048,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } attr->flow_tag = mark; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; continue; } return -EINVAL; } + attr->action = action; if (!actions_match_supported(priv, exts, parse_attr, flow)) return -EOPNOTSUPP; @@ -2044,6 +2112,20 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, return 0; } +static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, + struct net_device *peer_netdev) +{ + struct mlx5e_priv *peer_priv; + + peer_priv = netdev_priv(peer_netdev); + + return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && + (priv->netdev->netdev_ops == peer_netdev->netdev_ops) && + same_hw_devs(priv, peer_priv) && + MLX5_VPORT_MANAGER(peer_priv->mdev) && + (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS)); +} + static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct net_device *mirred_dev, struct net_device **out_dev, @@ -2459,60 +2541,71 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, const struct tc_action *a; LIST_HEAD(actions); bool encap = false; - int err = 0; + u32 action = 0; if (!tcf_exts_has_actions(exts)) return -EINVAL; - memset(attr, 0, sizeof(*attr)); attr->in_rep = rpriv->rep; + attr->in_mdev = priv->mdev; tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { if (is_tcf_gact_shot(a)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; continue; } if (is_tcf_pedit(a)) { + int err; + err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, parse_attr); if (err) return err; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + attr->mirror_count = attr->out_count; continue; } if (is_tcf_csum(a)) { - if (csum_offload_supported(priv, attr->action, + if (csum_offload_supported(priv, action, tcf_csum_update_flags(a))) continue; return -EOPNOTSUPP; } - if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *out_dev; + if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) { struct mlx5e_priv *out_priv; + struct net_device *out_dev; out_dev = tcf_mirred_dev(a); + if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { + pr_err("can't support more than %d output ports, can't offload forwarding\n", + attr->out_count); + return -EOPNOTSUPP; + } + if (switchdev_port_same_parent_id(priv->netdev, - out_dev)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + out_dev) || + is_merged_eswitch_dev(priv, out_dev)) { + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; - attr->out_rep = rpriv->rep; + attr->out_rep[attr->out_count] = rpriv->rep; + attr->out_mdev[attr->out_count++] = out_priv->mdev; } else if (encap) { parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->tun_info = *info; attr->parse_attr = parse_attr; - attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; /* attr->out_rep is resolved when we handle encap */ } else { pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", @@ -2528,14 +2621,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, encap = true; else return -EOPNOTSUPP; + attr->mirror_count = attr->out_count; continue; } if (is_tcf_vlan(a)) { if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; + action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; + action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; attr->vlan_vid = tcf_vlan_push_vid(a); if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { attr->vlan_prio = tcf_vlan_push_prio(a); @@ -2549,38 +2643,84 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } else { /* action is TCA_VLAN_ACT_MODIFY */ return -EOPNOTSUPP; } + attr->mirror_count = attr->out_count; continue; } if (is_tcf_tunnel_release(a)) { - attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; + action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; continue; } return -EINVAL; } + attr->action = action; if (!actions_match_supported(priv, exts, parse_attr, flow)) return -EOPNOTSUPP; - return err; + if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static void get_flags(int flags, u8 *flow_flags) +{ + u8 __flow_flags = 0; + + if (flags & MLX5E_TC_INGRESS) + __flow_flags |= MLX5E_TC_FLOW_INGRESS; + if (flags & MLX5E_TC_EGRESS) + __flow_flags |= MLX5E_TC_FLOW_EGRESS; + + *flow_flags = __flow_flags; +} + +static const struct rhashtable_params tc_ht_params = { + .head_offset = offsetof(struct mlx5e_tc_flow, node), + .key_offset = offsetof(struct mlx5e_tc_flow, cookie), + .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), + .automatic_shrinking = true, +}; + +static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *uplink_rpriv; + + if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) { + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + return &uplink_rpriv->tc_ht; + } else + return &priv->fs.tc.ht; } int mlx5e_configure_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, int flags) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct rhashtable *tc_ht = get_tc_ht(priv); struct mlx5e_tc_flow *flow; int attr_size, err = 0; u8 flow_flags = 0; + get_flags(flags, &flow_flags); + + flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + if (flow) { + netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie); + return 0; + } + if (esw && esw->mode == SRIOV_OFFLOADS) { - flow_flags = MLX5E_TC_FLOW_ESWITCH; + flow_flags |= MLX5E_TC_FLOW_ESWITCH; attr_size = sizeof(struct mlx5_esw_flow_attr); } else { - flow_flags = MLX5E_TC_FLOW_NIC; + flow_flags |= MLX5E_TC_FLOW_NIC; attr_size = sizeof(struct mlx5_nic_flow_attr); } @@ -2593,6 +2733,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, flow->cookie = f->cookie; flow->flags = flow_flags; + flow->priv = priv; err = parse_cls_flower(priv, flow, &parse_attr->spec, f); if (err < 0) @@ -2602,16 +2743,16 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); if (err < 0) goto err_free; - flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); } else { err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); if (err < 0) goto err_free; - flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); + flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow); } - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); + if (IS_ERR(flow->rule[0])) { + err = PTR_ERR(flow->rule[0]); if (err != -EAGAIN) goto err_free; } @@ -2623,8 +2764,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)) kvfree(parse_attr); - err = rhashtable_insert_fast(&tc->ht, &flow->node, - tc->ht_params); + err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); if (err) { mlx5e_tc_del_flow(priv, flow); kfree(flow); @@ -2638,18 +2778,28 @@ err_free: return err; } +#define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS) +#define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS) + +static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) +{ + if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK)) + return true; + + return false; +} + int mlx5e_delete_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, int flags) { + struct rhashtable *tc_ht = get_tc_ht(priv); struct mlx5e_tc_flow *flow; - struct mlx5e_tc_table *tc = &priv->fs.tc; - flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, - tc->ht_params); - if (!flow) + flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + if (!flow || !same_flow_direction(flow, flags)) return -EINVAL; - rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); + rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); mlx5e_tc_del_flow(priv, flow); @@ -2659,24 +2809,23 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, } int mlx5e_stats_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, int flags) { - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct rhashtable *tc_ht = get_tc_ht(priv); struct mlx5e_tc_flow *flow; struct mlx5_fc *counter; u64 bytes; u64 packets; u64 lastuse; - flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, - tc->ht_params); - if (!flow) + flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + if (!flow || !same_flow_direction(flow, flags)) return -EINVAL; if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) return 0; - counter = mlx5_flow_rule_counter(flow->rule); + counter = mlx5_flow_rule_counter(flow->rule[0]); if (!counter) return 0; @@ -2687,41 +2836,50 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, return 0; } -static const struct rhashtable_params mlx5e_tc_flow_ht_params = { - .head_offset = offsetof(struct mlx5e_tc_flow, node), - .key_offset = offsetof(struct mlx5e_tc_flow, cookie), - .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), - .automatic_shrinking = true, -}; - -int mlx5e_tc_init(struct mlx5e_priv *priv) +int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; hash_init(tc->mod_hdr_tbl); hash_init(tc->hairpin_tbl); - tc->ht_params = mlx5e_tc_flow_ht_params; - return rhashtable_init(&tc->ht, &tc->ht_params); + return rhashtable_init(&tc->ht, &tc_ht_params); } static void _mlx5e_tc_del_flow(void *ptr, void *arg) { struct mlx5e_tc_flow *flow = ptr; - struct mlx5e_priv *priv = arg; + struct mlx5e_priv *priv = flow->priv; mlx5e_tc_del_flow(priv, flow); kfree(flow); } -void mlx5e_tc_cleanup(struct mlx5e_priv *priv) +void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) { struct mlx5e_tc_table *tc = &priv->fs.tc; - rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); + rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); if (!IS_ERR_OR_NULL(tc->t)) { mlx5_destroy_flow_table(tc->t); tc->t = NULL; } } + +int mlx5e_tc_esw_init(struct rhashtable *tc_ht) +{ + return rhashtable_init(tc_ht, &tc_ht_params); +} + +void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) +{ + rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); +} + +int mlx5e_tc_num_filters(struct mlx5e_priv *priv) +{ + struct rhashtable *tc_ht = get_tc_ht(priv); + + return atomic_read(&tc_ht->nelems); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index c14c263a739b..49436bf3b80a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -38,16 +38,26 @@ #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff #ifdef CONFIG_MLX5_ESWITCH -int mlx5e_tc_init(struct mlx5e_priv *priv); -void mlx5e_tc_cleanup(struct mlx5e_priv *priv); + +enum { + MLX5E_TC_INGRESS = BIT(0), + MLX5E_TC_EGRESS = BIT(1), + MLX5E_TC_LAST_EXPORTED_BIT = 1, +}; + +int mlx5e_tc_nic_init(struct mlx5e_priv *priv); +void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); + +int mlx5e_tc_esw_init(struct rhashtable *tc_ht); +void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); int mlx5e_configure_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f, int flags); int mlx5e_delete_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f, int flags); int mlx5e_stats_flower(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f, int flags); struct mlx5e_encap_entry; void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, @@ -58,14 +68,11 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); -static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) -{ - return atomic_read(&priv->fs.tc.ht.nelems); -} +int mlx5e_tc_num_filters(struct mlx5e_priv *priv); #else /* CONFIG_MLX5_ESWITCH */ -static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; } -static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {} +static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 5532aa3675c7..f29deb44bf3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -35,12 +35,21 @@ #include <net/dsfield.h> #include "en.h" #include "ipoib/ipoib.h" -#include "en_accel/ipsec_rxtx.h" +#include "en_accel/en_accel.h" #include "lib/clock.h" #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS + +#ifndef CONFIG_MLX5_EN_TLS #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ MLX5E_SQ_NOPS_ROOM) +#else +/* TLS offload requires MLX5E_SQ_STOP_ROOM to have + * enough room for a resync SKB, a normal SKB and a NOP + */ +#define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\ + MLX5E_SQ_NOPS_ROOM) +#endif static inline void mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) @@ -179,28 +188,16 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, return min_t(u16, hlen, skb_headlen(skb)); } -static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, - unsigned int *skb_len, - unsigned int len) -{ - *skb_len -= len; - *skb_data += len; -} - -static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, - unsigned char **skb_data, - unsigned int *skb_len) +static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) { struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - memcpy(vhdr, *skb_data, cpy1_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); + memcpy(vhdr, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); - memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); - mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); + memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); } static inline void @@ -211,34 +208,31 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct if (skb->encapsulation) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | MLX5_ETH_WQE_L4_INNER_CSUM; - sq->stats.csum_partial_inner++; + sq->stats->csum_partial_inner++; } else { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; - sq->stats.csum_partial++; + sq->stats->csum_partial++; } } else - sq->stats.csum_none++; + sq->stats->csum_none++; } static inline u16 -mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes) +mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) { + struct mlx5e_sq_stats *stats = sq->stats; u16 ihs; - eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); - if (skb->encapsulation) { ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); - sq->stats.tso_inner_packets++; - sq->stats.tso_inner_bytes += skb->len - ihs; + stats->tso_inner_packets++; + stats->tso_inner_bytes += skb->len - ihs; } else { ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - sq->stats.tso_packets++; - sq->stats.tso_bytes += skb->len - ihs; + stats->tso_packets++; + stats->tso_bytes += skb->len - ihs; } - *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; return ihs; } @@ -291,17 +285,34 @@ dma_unmap_wqe_err: return -ENOMEM; } +static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, + struct mlx5_wq_cyc *wq, + u16 pi, u16 frag_pi) +{ + struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; + u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; + + edge_wi = wi + nnops; + + /* fill sq frag edge with nops to avoid wqe wrapping two pages */ + for (; wi < edge_wi; wi++) { + wi->skb = NULL; + wi->num_wqebbs = 1; + mlx5e_post_nop(wq, sq->sqn, &sq->pc); + } + sq->stats->nop += nnops; +} + static inline void mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, - u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma, + u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) { struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi; wi->num_bytes = num_bytes; wi->num_dma = num_dma; - wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + wi->num_wqebbs = num_wqebbs; wi->skb = skb; cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); @@ -315,84 +326,108 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq->pc += wi->num_wqebbs; if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { netif_tx_stop_queue(sq->txq); - sq->stats.stopped++; + sq->stats->stopped++; } if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); - - /* fill sq edge with nops to avoid wqe wrap around */ - while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { - sq->db.wqe_info[pi].skb = NULL; - mlx5e_post_nop(wq, sq->sqn, &sq->pc); - sq->stats.nop++; - } } -static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tx_wqe *wqe, u16 pi) -{ - struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; +#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; +netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe *wqe, u16 pi) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_wqe_eth_seg *eseg; + struct mlx5_wqe_data_seg *dseg; + struct mlx5e_tx_wqe_info *wi; - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; - u8 opcode = MLX5_OPCODE_SEND; - unsigned int num_bytes; + struct mlx5e_sq_stats *stats = sq->stats; + u16 ds_cnt, ds_cnt_inl = 0; + u16 headlen, ihs, frag_pi; + u8 num_wqebbs, opcode; + u32 num_bytes; int num_dma; - u16 headlen; - u16 ds_cnt; - u16 ihs; - - mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + __be16 mss; + /* Calc ihs and ds cnt, no writes to wqe yet */ + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (skb_is_gso(skb)) { - opcode = MLX5_OPCODE_LSO; - ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); - sq->stats.packets += skb_shinfo(skb)->gso_segs; + opcode = MLX5_OPCODE_LSO; + mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + ihs = mlx5e_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + stats->packets += skb_shinfo(skb)->gso_segs; } else { - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + opcode = MLX5_OPCODE_SEND; + mss = 0; + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - sq->stats.packets++; + stats->packets++; } - sq->stats.bytes += num_bytes; - sq->stats.xmit_more += skb->xmit_more; - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; + stats->bytes += num_bytes; + stats->xmit_more += skb->xmit_more; + + headlen = skb->len - ihs - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + + if (ihs) { + ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN; + + ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); + ds_cnt += ds_cnt_inl; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); + mlx5e_sq_fetch_wqe(sq, &wqe, &pi); + } + + /* fill wqe */ + wi = &sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + eseg = &wqe->eth; + dseg = wqe->data; + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + eseg->mss = mss; + if (ihs) { + eseg->inline_hdr.sz = cpu_to_be16(ihs); if (skb_vlan_tag_present(skb)) { - mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); - ihs += VLAN_HLEN; - sq->stats.added_vlan_packets++; + ihs -= VLAN_HLEN; + mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs); + stats->added_vlan_packets++; } else { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); } - eseg->inline_hdr.sz = cpu_to_be16(ihs); - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + dseg += ds_cnt_inl; } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); - sq->stats.added_vlan_packets++; + stats->added_vlan_packets++; } - headlen = skb_len - skb->data_len; - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, - (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, - num_bytes, num_dma, wi, cseg); + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi, cseg); return NETDEV_TX_OK; err_drop: - sq->stats.dropped++; + stats->dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -401,21 +436,19 @@ err_drop: netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; - struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + struct mlx5e_tx_wqe *wqe; + struct mlx5e_txqsq *sq; + u16 pi; - memset(wqe, 0, sizeof(*wqe)); + sq = priv->txq2sq[skb_get_queue_mapping(skb)]; + mlx5e_sq_fetch_wqe(sq, &wqe, &pi); -#ifdef CONFIG_MLX5_EN_IPSEC - if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { - skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb); - if (unlikely(!skb)) - return NETDEV_TX_OK; - } +#ifdef CONFIG_MLX5_ACCEL + /* might send skbs and update wqe and pi */ + skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); + if (unlikely(!skb)) + return NETDEV_TX_OK; #endif - return mlx5e_sq_xmit(sq, skb, wqe, pi); } @@ -443,7 +476,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sq = container_of(cq, struct mlx5e_txqsq, cq); - if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) return false; cqe = mlx5_cqwq_get_cqe(&cq->wq); @@ -478,7 +511,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) queue_work(cq->channel->priv->wq, &sq->recover.recover_work); } - sq->stats.cqe_err++; + sq->stats->cqe_err++; } do { @@ -489,7 +522,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) last_wqe = (sqcc == wqe_counter); - ci = sqcc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wi = &sq->db.wqe_info[ci]; skb = wi->skb; @@ -538,7 +571,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) MLX5E_SQ_STOP_ROOM) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); - sq->stats.wake++; + sq->stats->wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); @@ -552,7 +585,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) int i; while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); wi = &sq->db.wqe_info[ci]; skb = wi->skb; @@ -574,18 +607,6 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) } #ifdef CONFIG_MLX5_CORE_IPOIB - -struct mlx5_wqe_eth_pad { - u8 rsvd0[16]; -}; - -struct mlx5i_tx_wqe { - struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_datagram_seg datagram; - struct mlx5_wqe_eth_pad pad; - struct mlx5_wqe_eth_seg eth; -}; - static inline void mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, struct mlx5_wqe_datagram_seg *dseg) @@ -598,67 +619,92 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey) { - struct mlx5_wq_cyc *wq = &sq->wq; - u16 pi = sq->pc & wq->sz_m1; - struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); - struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; - - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - - unsigned char *skb_data = skb->data; - unsigned int skb_len = skb->len; - u8 opcode = MLX5_OPCODE_SEND; - unsigned int num_bytes; - int num_dma; - u16 headlen; - u16 ds_cnt; - u16 ihs; - - memset(wqe, 0, sizeof(*wqe)); + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5i_tx_wqe *wqe; - mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); + struct mlx5_wqe_datagram_seg *datagram; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_wqe_eth_seg *eseg; + struct mlx5_wqe_data_seg *dseg; + struct mlx5e_tx_wqe_info *wi; - mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + struct mlx5e_sq_stats *stats = sq->stats; + u16 headlen, ihs, pi, frag_pi; + u16 ds_cnt, ds_cnt_inl = 0; + u8 num_wqebbs, opcode; + u32 num_bytes; + int num_dma; + __be16 mss; + /* Calc ihs and ds cnt, no writes to wqe yet */ + ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (skb_is_gso(skb)) { - opcode = MLX5_OPCODE_LSO; - ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); - sq->stats.packets += skb_shinfo(skb)->gso_segs; + opcode = MLX5_OPCODE_LSO; + mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + ihs = mlx5e_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; + stats->packets += skb_shinfo(skb)->gso_segs; } else { - ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); + opcode = MLX5_OPCODE_SEND; + mss = 0; + ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); - sq->stats.packets++; + stats->packets++; } - sq->stats.bytes += num_bytes; - sq->stats.xmit_more += skb->xmit_more; + stats->bytes += num_bytes; + stats->xmit_more += skb->xmit_more; + + headlen = skb->len - ihs - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + + if (ihs) { + ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); + ds_cnt += ds_cnt_inl; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); + frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); + if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); + } + + mlx5i_sq_fetch_wqe(sq, &wqe, &pi); + + /* fill wqe */ + wi = &sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + datagram = &wqe->datagram; + eseg = &wqe->eth; + dseg = wqe->data; + + mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); + + mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); + + eseg->mss = mss; - ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; if (ihs) { - memcpy(eseg->inline_hdr.start, skb_data, ihs); - mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); + memcpy(eseg->inline_hdr.start, skb->data, ihs); eseg->inline_hdr.sz = cpu_to_be16(ihs); - ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); + dseg += ds_cnt_inl; } - headlen = skb_len - skb->data_len; - num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, - (struct mlx5_wqe_data_seg *)cseg + ds_cnt); + num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); if (unlikely(num_dma < 0)) goto err_drop; - mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, - num_bytes, num_dma, wi, cseg); + mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi, cseg); return NETDEV_TX_OK; err_drop: - sq->stats.dropped++; + stats->dropped++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index f292bb346985..1b17f682693b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -44,6 +44,32 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) return cpumask_test_cpu(current_cpu, aff); } +static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) +{ + struct mlx5e_sq_stats *stats = sq->stats; + struct net_dim_sample dim_sample; + + if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) + return; + + net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes, + &dim_sample); + net_dim(&sq->dim, dim_sample); +} + +static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) +{ + struct mlx5e_rq_stats *stats = rq->stats; + struct net_dim_sample dim_sample; + + if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) + return; + + net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes, + &dim_sample); + net_dim(&rq->dim, dim_sample); +} + int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, @@ -75,18 +101,13 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) if (unlikely(!napi_complete_done(napi, work_done))) return work_done; - for (i = 0; i < c->num_tc; i++) + for (i = 0; i < c->num_tc; i++) { + mlx5e_handle_tx_dim(&c->sq[i]); mlx5e_cq_arm(&c->sq[i].cq); - - if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) { - struct net_dim_sample dim_sample; - net_dim_sample(c->rq.cq.event_ctr, - c->rq.stats.packets, - c->rq.stats.bytes, - &dim_sample); - net_dim(&c->rq.dim, dim_sample); } + mlx5e_handle_rx_dim(&c->rq); + mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 1814f803bd2c..406c23862f5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -144,6 +144,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_GPIO_EVENT"; case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; + case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: + return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; case MLX5_EVENT_TYPE_REMOTE_CONFIG: return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; case MLX5_EVENT_TYPE_DB_BF_CONGESTION: @@ -162,6 +164,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; case MLX5_EVENT_TYPE_FPGA_ERROR: return "MLX5_EVENT_TYPE_FPGA_ERROR"; + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: + return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; case MLX5_EVENT_TYPE_GENERAL_EVENT: return "MLX5_EVENT_TYPE_GENERAL_EVENT"; default: @@ -396,6 +400,20 @@ static void general_event_handler(struct mlx5_core_dev *dev, } } +static void mlx5_temp_warning_event(struct mlx5_core_dev *dev, + struct mlx5_eqe *eqe) +{ + u64 value_lsb; + u64 value_msb; + + value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); + value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); + + mlx5_core_warn(dev, + "High temperature on sensors with bit set %llx %llx", + value_msb, value_lsb); +} + /* caller must eventually call mlx5_cq_put on the returned cq */ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) { @@ -547,9 +565,14 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) break; case MLX5_EVENT_TYPE_FPGA_ERROR: + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); break; + case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: + mlx5_temp_warning_event(dev, eqe); + break; + case MLX5_EVENT_TYPE_GENERAL_EVENT: general_event_handler(dev, eqe); break; @@ -822,10 +845,13 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT); if (MLX5_CAP_GEN(dev, fpga)) - async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR); + async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) | + (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR); if (MLX5_CAP_GEN_MAX(dev, dct)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_DCT_DRAINED); + if (MLX5_CAP_GEN(dev, temp_warn_event)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1352d13eedb3..6cab1dd66d1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -192,7 +192,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, } dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport_num = vport; + dest.vport.num = vport; esw_debug(esw->dev, "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", @@ -200,7 +200,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, spec->match_criteria_enable = match_header; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = - mlx5_add_flow_rules(esw->fdb_table.fdb, spec, + mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, @@ -282,7 +282,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create FDB Table err %d\n", err); goto out; } - esw->fdb_table.fdb = fdb; + esw->fdb_table.legacy.fdb = fdb; /* Addresses group : Full match unicast/multicast addresses */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -343,9 +343,9 @@ out: mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); esw->fdb_table.legacy.addr_grp = NULL; } - if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { - mlx5_destroy_flow_table(esw->fdb_table.fdb); - esw->fdb_table.fdb = NULL; + if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) { + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + esw->fdb_table.legacy.fdb = NULL; } } @@ -355,15 +355,15 @@ out: static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) { - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return; esw_debug(esw->dev, "Destroy FDB Table\n"); mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); - mlx5_destroy_flow_table(esw->fdb_table.fdb); - esw->fdb_table.fdb = NULL; + mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + esw->fdb_table.legacy.fdb = NULL; esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL; @@ -396,7 +396,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ - if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) + if (esw->fdb_table.legacy.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", @@ -486,7 +486,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return 0; esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); @@ -526,7 +526,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.legacy.fdb) return 0; esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 4cd773fa55e3..b174da2884c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -55,6 +55,9 @@ #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) +#define mlx5_esw_has_fwd_fdb(dev) \ + MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) + struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allow_untagged_spoofchk_grp; @@ -117,16 +120,18 @@ struct mlx5_vport { }; struct mlx5_eswitch_fdb { - void *fdb; union { struct legacy_fdb { + struct mlx5_flow_table *fdb; struct mlx5_flow_group *addr_grp; struct mlx5_flow_group *allmulti_grp; struct mlx5_flow_group *promisc_grp; } legacy; struct offloads_fdb { - struct mlx5_flow_table *fdb; + struct mlx5_flow_table *fast_fdb; + struct mlx5_flow_table *fwd_fdb; + struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *miss_grp; struct mlx5_flow_handle *miss_rule_uni; @@ -214,6 +219,10 @@ struct mlx5_flow_handle * mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr); +struct mlx5_flow_handle * +mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr); void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, @@ -227,9 +236,24 @@ enum { SET_VLAN_INSERT = BIT(1) }; +enum mlx5_flow_match_level { + MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, + MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, + MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, + MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, +}; + +/* current maximum for flow based vport multicasting */ +#define MLX5_MAX_FLOW_FWD_VPORTS 2 + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; - struct mlx5_eswitch_rep *out_rep; + struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5_core_dev *in_mdev; + + int mirror_count; + int out_count; int action; __be16 vlan_proto; @@ -238,6 +262,7 @@ struct mlx5_esw_flow_attr { bool vlan_handled; u32 encap_id; u32 mod_hdr_id; + u8 match_level; struct mlx5e_tc_flow_parse_attr *parse_attr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 35e256eb2f6e..cecd201f0b73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -48,16 +48,22 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec, struct mlx5_esw_flow_attr *attr) { - struct mlx5_flow_destination dest[2] = {}; + struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_table *ft = NULL; struct mlx5_fc *counter = NULL; struct mlx5_flow_handle *rule; + int j, i = 0; void *misc; - int i = 0; if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); + if (attr->mirror_count) + ft = esw->fdb_table.offloads.fwd_fdb; + else + ft = esw->fdb_table.offloads.fast_fdb; + flow_act.action = attr->action; /* if per flow vlan pop/push is emulated, don't set that into the firmware */ if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) @@ -70,9 +76,14 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport_num = attr->out_rep->vport; - i++; + for (j = attr->mirror_count; j < attr->out_count; j++) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport.num = attr->out_rep[j]->vport; + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); + dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + i++; + } } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(esw->dev, true); @@ -88,11 +99,23 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(attr->in_mdev, vhca_id)); + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + if (attr->match_level == MLX5_MATCH_NONE) + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + else + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS; - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS; if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; @@ -102,8 +125,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) flow_act.encap_id = attr->encap_id; - rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb, - spec, &flow_act, dest, i); + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, i); if (IS_ERR(rule)) goto err_add_rule; else @@ -117,6 +139,57 @@ err_counter_alloc: return rule; } +struct mlx5_flow_handle * +mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + struct mlx5_esw_flow_attr *attr) +{ + struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_handle *rule; + void *misc; + int i; + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + for (i = 0; i < attr->mirror_count; i++) { + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest[i].vport.num = attr->out_rep[i]->vport; + dest[i].vport.vhca_id = + MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); + dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + } + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = esw->fdb_table.offloads.fwd_fdb, + i++; + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport); + + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(attr->in_mdev, vhca_id)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + if (attr->match_level == MLX5_MATCH_NONE) + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + else + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS; + + rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fast_fdb, spec, &flow_act, dest, i); + + if (!IS_ERR(rule)) + esw->offloads.num_flows++; + + return rule; +} + void mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, @@ -156,7 +229,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; in_rep = attr->in_rep; - out_rep = attr->out_rep; + out_rep = attr->out_rep[0]; if (push) vport = in_rep; @@ -177,7 +250,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, goto out_notsupp; in_rep = attr->in_rep; - out_rep = attr->out_rep; + out_rep = attr->out_rep[0]; if (push && in_rep->vport == FDB_UPLINK_VPORT) goto out_notsupp; @@ -228,7 +301,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep->vport == FDB_UPLINK_VPORT) { + if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) { vport->vlan_refcount++; attr->vlan_handled = true; } @@ -288,7 +361,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep->vport == FDB_UPLINK_VPORT) + if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) vport->vlan_refcount--; return 0; @@ -343,10 +416,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport_num = vport; + dest.vport.num = vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); @@ -387,10 +460,10 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_c[0] = 0x01; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport_num = 0; + dest.vport.num = 0; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -405,7 +478,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16); dmac_v[0] = 0x01; - flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec, + flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -437,7 +510,7 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); err = -EOPNOTSUPP; - goto out; + goto out_namespace; } esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", @@ -447,6 +520,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + if (mlx5_esw_has_fwd_fdb(dev)) + esw_size >>= 1; + if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) flags |= MLX5_FLOW_TABLE_TUNNEL_EN; @@ -457,17 +533,37 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); - goto out; + goto out_namespace; } - esw->fdb_table.fdb = fdb; + esw->fdb_table.offloads.fast_fdb = fdb; -out: + if (!mlx5_esw_has_fwd_fdb(dev)) + goto out_namespace; + + fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, + esw_size, + ESW_OFFLOADS_NUM_GROUPS, 1, + flags); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create fwd table err %d\n", err); + goto out_ft; + } + esw->fdb_table.offloads.fwd_fdb = fdb; + + return err; + +out_ft: + mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); +out_namespace: return err; } static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw) { - mlx5_destroy_flow_table(esw->fdb_table.fdb); + if (mlx5_esw_has_fwd_fdb(esw->dev)) + mlx5_destroy_flow_table(esw->fdb_table.offloads.fwd_fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.fast_fdb); } #define MAX_PF_SQ 256 @@ -513,7 +609,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); goto slow_fdb_err; } - esw->fdb_table.offloads.fdb = fdb; + esw->fdb_table.offloads.slow_fdb = fdb; /* create send-to-vport group */ memset(flow_group_in, 0, inlen); @@ -569,9 +665,9 @@ miss_rule_err: miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: - mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); slow_fdb_err: - mlx5_destroy_flow_table(esw->fdb_table.fdb); + esw_destroy_offloads_fast_fdb_table(esw); fast_fdb_err: ns_err: kvfree(flow_group_in); @@ -580,7 +676,7 @@ ns_err: static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) { - if (!esw->fdb_table.fdb) + if (!esw->fdb_table.offloads.fast_fdb) return; esw_debug(esw->dev, "Destroy offloads FDB Tables\n"); @@ -589,7 +685,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); - mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); esw_destroy_offloads_fast_fdb_table(esw); } @@ -663,7 +759,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) esw->offloads.vport_rx_group = g; out: - kfree(flow_group_in); + kvfree(flow_group_in); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h index d05233c9b4f6..eb8b0fe0b4e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h @@ -35,6 +35,13 @@ #include <linux/mlx5/driver.h> +enum mlx5_fpga_device_id { + MLX5_FPGA_DEVICE_UNKNOWN = 0, + MLX5_FPGA_DEVICE_KU040 = 1, + MLX5_FPGA_DEVICE_KU060 = 2, + MLX5_FPGA_DEVICE_KU060_2 = 3, +}; + enum mlx5_fpga_image { MLX5_FPGA_IMAGE_USER = 0, MLX5_FPGA_IMAGE_FACTORY, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index de7fe087d6fe..4138a770ed57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -181,6 +181,7 @@ int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn, if (!conn->qp.active) return -ENOTCONN; + buf->dma_dir = DMA_TO_DEVICE; err = mlx5_fpga_conn_map_buf(conn, buf); if (err) return err; @@ -255,8 +256,6 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1); buf = conn->qp.rq.bufs[ix]; conn->qp.rq.bufs[ix] = NULL; - if (!status) - buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); conn->qp.rq.cc++; if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR))) @@ -274,6 +273,7 @@ static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn, return; } + buf->sg[0].size = be32_to_cpu(cqe->byte_cnt); mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n", buf->sg[0].size); conn->recv_cb(conn->cb_arg, buf); @@ -454,7 +454,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) } inlen = MLX5_ST_SZ_BYTES(create_cq_in) + - sizeof(u64) * conn->cq.wq_ctrl.frag_buf.npages; + sizeof(u64) * conn->cq.wq_ctrl.buf.npages; in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; @@ -469,12 +469,12 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); - MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.frag_buf.page_shift - + MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma); pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); - mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.frag_buf, pas); + mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas); err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen); kvfree(in); @@ -500,7 +500,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) goto out; err_cqwq: - mlx5_cqwq_destroy(&conn->cq.wq_ctrl); + mlx5_wq_destroy(&conn->cq.wq_ctrl); out: return err; } @@ -510,7 +510,7 @@ static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn) tasklet_disable(&conn->cq.tasklet); tasklet_kill(&conn->cq.tasklet); mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq); - mlx5_cqwq_destroy(&conn->cq.wq_ctrl); + mlx5_wq_destroy(&conn->cq.wq_ctrl); } static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc) @@ -591,8 +591,8 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn, if (MLX5_CAP_GEN(mdev, cqe_version) == 1) MLX5_SET(qpc, qpc, user_index, 0xFFFFFF); - mlx5_fill_page_array(&conn->qp.wq_ctrl.buf, - (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); + mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas)); err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h index 44bd9eccc711..634ae10e287b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h @@ -54,7 +54,7 @@ struct mlx5_fpga_conn { /* CQ */ struct { struct mlx5_cqwq wq; - struct mlx5_frag_wq_ctrl wq_ctrl; + struct mlx5_wq_ctrl wq_ctrl; struct mlx5_core_cq mcq; struct tasklet_struct tasklet; } cq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index dc8970346521..436a8136f26f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -50,6 +50,11 @@ static const char *const mlx5_fpga_error_strings[] = { "Temperature Critical", }; +static const char * const mlx5_fpga_qp_error_strings[] = { + "Null Syndrome", + "Retry Counter Expired", + "RNR Expired", +}; static struct mlx5_fpga_device *mlx5_fpga_device_alloc(void) { struct mlx5_fpga_device *fdev = NULL; @@ -75,6 +80,21 @@ static const char *mlx5_fpga_image_name(enum mlx5_fpga_image image) } } +static const char *mlx5_fpga_device_name(u32 device) +{ + switch (device) { + case MLX5_FPGA_DEVICE_KU040: + return "ku040"; + case MLX5_FPGA_DEVICE_KU060: + return "ku060"; + case MLX5_FPGA_DEVICE_KU060_2: + return "ku060_2"; + case MLX5_FPGA_DEVICE_UNKNOWN: + default: + return "unknown"; + } +} + static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) { struct mlx5_fpga_query query; @@ -128,8 +148,9 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; - unsigned long flags; unsigned int max_num_qps; + unsigned long flags; + u32 fpga_device_id; int err; if (!fdev) @@ -143,12 +164,23 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) if (err) goto out; - mlx5_fpga_info(fdev, "device %u; %s image, version %u\n", - MLX5_CAP_FPGA(fdev->mdev, fpga_device), + fpga_device_id = MLX5_CAP_FPGA(fdev->mdev, fpga_device); + mlx5_fpga_info(fdev, "%s:%u; %s image, version %u; SBU %06x:%04x version %d\n", + mlx5_fpga_device_name(fpga_device_id), + fpga_device_id, mlx5_fpga_image_name(fdev->last_oper_image), - MLX5_CAP_FPGA(fdev->mdev, image_version)); + MLX5_CAP_FPGA(fdev->mdev, image_version), + MLX5_CAP_FPGA(fdev->mdev, ieee_vendor_id), + MLX5_CAP_FPGA(fdev->mdev, sandbox_product_id), + MLX5_CAP_FPGA(fdev->mdev, sandbox_product_version)); max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps); + if (!max_num_qps) { + mlx5_fpga_err(fdev, "FPGA reports 0 QPs in SHELL_CAPS\n"); + err = -ENOTSUPP; + goto out; + } + err = mlx5_core_reserve_gids(mdev, max_num_qps); if (err) goto out; @@ -244,23 +276,38 @@ static const char *mlx5_fpga_syndrome_to_string(u8 syndrome) return "Unknown"; } +static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome) +{ + if (syndrome < ARRAY_SIZE(mlx5_fpga_qp_error_strings)) + return mlx5_fpga_qp_error_strings[syndrome]; + return "Unknown"; +} + void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) { struct mlx5_fpga_device *fdev = mdev->fpga; const char *event_name; bool teardown = false; unsigned long flags; + u32 fpga_qpn; u8 syndrome; - if (event != MLX5_EVENT_TYPE_FPGA_ERROR) { + switch (event) { + case MLX5_EVENT_TYPE_FPGA_ERROR: + syndrome = MLX5_GET(fpga_error_event, data, syndrome); + event_name = mlx5_fpga_syndrome_to_string(syndrome); + break; + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: + syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome); + event_name = mlx5_fpga_qp_syndrome_to_string(syndrome); + fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); + break; + default: mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n", event); return; } - syndrome = MLX5_GET(fpga_error_event, data, syndrome); - event_name = mlx5_fpga_syndrome_to_string(syndrome); - spin_lock_irqsave(&fdev->state_lock, flags); switch (fdev->state) { case MLX5_FPGA_STATUS_SUCCESS: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index 82405ed84725..3e2355c8df3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -53,6 +53,7 @@ struct mlx5_fpga_device { } conn_res; struct mlx5_fpga_ipsec *ipsec; + struct mlx5_fpga_tls *tls; }; #define mlx5_fpga_dbg(__adev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index fad8c2e3804e..a0433b48e833 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -43,9 +43,6 @@ #include "fpga/sdk.h" #include "fpga/core.h" -#define SBU_QP_QUEUE_SIZE 8 -#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC (60 * 1000) - enum mlx5_fpga_ipsec_cmd_status { MLX5_FPGA_IPSEC_CMD_PENDING, MLX5_FPGA_IPSEC_CMD_SEND_FAIL, @@ -256,7 +253,7 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx) { struct mlx5_fpga_ipsec_cmd_context *context = ctx; unsigned long timeout = - msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC); + msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC); int res; res = wait_for_completion_timeout(&context->complete, timeout); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h index baa537e54a49..656f96be6e20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/sdk.h @@ -41,9 +41,17 @@ * DOC: Innova SDK * This header defines the in-kernel API for Innova FPGA client drivers. */ +#define SBU_QP_QUEUE_SIZE 8 +#define MLX5_FPGA_CMD_TIMEOUT_MSEC (60 * 1000) +/** + * enum mlx5_fpga_access_type - Enumerated the different methods possible for + * accessing the device memory address space + */ enum mlx5_fpga_access_type { + /** Use the slow CX-FPGA I2C bus */ MLX5_FPGA_ACCESS_TYPE_I2C = 0x0, + /** Use the fastest available method */ MLX5_FPGA_ACCESS_TYPE_DONTCARE = 0x0, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c new file mode 100644 index 000000000000..c9736238604a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c @@ -0,0 +1,562 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/mlx5/device.h> +#include "fpga/tls.h" +#include "fpga/cmd.h" +#include "fpga/sdk.h" +#include "fpga/core.h" +#include "accel/tls.h" + +struct mlx5_fpga_tls_command_context; + +typedef void (*mlx5_fpga_tls_command_complete) + (struct mlx5_fpga_conn *conn, struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *ctx, + struct mlx5_fpga_dma_buf *resp); + +struct mlx5_fpga_tls_command_context { + struct list_head list; + /* There is no guarantee on the order between the TX completion + * and the command response. + * The TX completion is going to touch cmd->buf even in + * the case of successful transmission. + * So instead of requiring separate allocations for cmd + * and cmd->buf we've decided to use a reference counter + */ + refcount_t ref; + struct mlx5_fpga_dma_buf buf; + mlx5_fpga_tls_command_complete complete; +}; + +static void +mlx5_fpga_tls_put_command_ctx(struct mlx5_fpga_tls_command_context *ctx) +{ + if (refcount_dec_and_test(&ctx->ref)) + kfree(ctx); +} + +static void mlx5_fpga_tls_cmd_complete(struct mlx5_fpga_device *fdev, + struct mlx5_fpga_dma_buf *resp) +{ + struct mlx5_fpga_conn *conn = fdev->tls->conn; + struct mlx5_fpga_tls_command_context *ctx; + struct mlx5_fpga_tls *tls = fdev->tls; + unsigned long flags; + + spin_lock_irqsave(&tls->pending_cmds_lock, flags); + ctx = list_first_entry(&tls->pending_cmds, + struct mlx5_fpga_tls_command_context, list); + list_del(&ctx->list); + spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); + ctx->complete(conn, fdev, ctx, resp); +} + +static void mlx5_fpga_cmd_send_complete(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_dma_buf *buf, + u8 status) +{ + struct mlx5_fpga_tls_command_context *ctx = + container_of(buf, struct mlx5_fpga_tls_command_context, buf); + + mlx5_fpga_tls_put_command_ctx(ctx); + + if (unlikely(status)) + mlx5_fpga_tls_cmd_complete(fdev, NULL); +} + +static void mlx5_fpga_tls_cmd_send(struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *cmd, + mlx5_fpga_tls_command_complete complete) +{ + struct mlx5_fpga_tls *tls = fdev->tls; + unsigned long flags; + int ret; + + refcount_set(&cmd->ref, 2); + cmd->complete = complete; + cmd->buf.complete = mlx5_fpga_cmd_send_complete; + + spin_lock_irqsave(&tls->pending_cmds_lock, flags); + /* mlx5_fpga_sbu_conn_sendmsg is called under pending_cmds_lock + * to make sure commands are inserted to the tls->pending_cmds list + * and the command QP in the same order. + */ + ret = mlx5_fpga_sbu_conn_sendmsg(tls->conn, &cmd->buf); + if (likely(!ret)) + list_add_tail(&cmd->list, &tls->pending_cmds); + else + complete(tls->conn, fdev, cmd, NULL); + spin_unlock_irqrestore(&tls->pending_cmds_lock, flags); +} + +/* Start of context identifiers range (inclusive) */ +#define SWID_START 0 +/* End of context identifiers range (exclusive) */ +#define SWID_END BIT(24) + +static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock, + void *ptr) +{ + int ret; + + /* TLS metadata format is 1 byte for syndrome followed + * by 3 bytes of swid (software ID) + * swid must not exceed 3 bytes. + * See tls_rxtx.c:insert_pet() for details + */ + BUILD_BUG_ON((SWID_END - 1) & 0xFF000000); + + idr_preload(GFP_KERNEL); + spin_lock_irq(idr_spinlock); + ret = idr_alloc(idr, ptr, SWID_START, SWID_END, GFP_ATOMIC); + spin_unlock_irq(idr_spinlock); + idr_preload_end(); + + return ret; +} + +static void mlx5_fpga_tls_release_swid(struct idr *idr, + spinlock_t *idr_spinlock, u32 swid) +{ + unsigned long flags; + + spin_lock_irqsave(idr_spinlock, flags); + idr_remove(idr, swid); + spin_unlock_irqrestore(idr_spinlock, flags); +} + +struct mlx5_teardown_stream_context { + struct mlx5_fpga_tls_command_context cmd; + u32 swid; +}; + +static void +mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *cmd, + struct mlx5_fpga_dma_buf *resp) +{ + struct mlx5_teardown_stream_context *ctx = + container_of(cmd, struct mlx5_teardown_stream_context, cmd); + + if (resp) { + u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); + + if (syndrome) + mlx5_fpga_err(fdev, + "Teardown stream failed with syndrome = %d", + syndrome); + else + mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr, + &fdev->tls->idr_spinlock, + ctx->swid); + } + mlx5_fpga_tls_put_command_ctx(cmd); +} + +static void mlx5_fpga_tls_flow_to_cmd(void *flow, void *cmd) +{ + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, src_port), flow, + MLX5_BYTE_OFF(tls_flow, ipv6)); + + MLX5_SET(tls_cmd, cmd, ipv6, MLX5_GET(tls_flow, flow, ipv6)); + MLX5_SET(tls_cmd, cmd, direction_sx, + MLX5_GET(tls_flow, flow, direction_sx)); +} + +static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, + void *flow, u32 swid, gfp_t flags) +{ + struct mlx5_teardown_stream_context *ctx; + struct mlx5_fpga_dma_buf *buf; + void *cmd; + + ctx = kzalloc(sizeof(*ctx) + MLX5_TLS_COMMAND_SIZE, flags); + if (!ctx) + return; + + buf = &ctx->cmd.buf; + cmd = (ctx + 1); + MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); + MLX5_SET(tls_cmd, cmd, swid, swid); + + mlx5_fpga_tls_flow_to_cmd(flow, cmd); + kfree(flow); + + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + + ctx->swid = swid; + mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_teardown_completion); +} + +void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags) +{ + struct mlx5_fpga_tls *tls = mdev->fpga->tls; + void *flow; + + rcu_read_lock(); + flow = idr_find(&tls->tx_idr, swid); + rcu_read_unlock(); + + if (!flow) { + mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", + swid); + return; + } + + mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); +} + +enum mlx5_fpga_setup_stream_status { + MLX5_FPGA_CMD_PENDING, + MLX5_FPGA_CMD_SEND_FAILED, + MLX5_FPGA_CMD_RESPONSE_RECEIVED, + MLX5_FPGA_CMD_ABANDONED, +}; + +struct mlx5_setup_stream_context { + struct mlx5_fpga_tls_command_context cmd; + atomic_t status; + u32 syndrome; + struct completion comp; +}; + +static void +mlx5_fpga_tls_setup_completion(struct mlx5_fpga_conn *conn, + struct mlx5_fpga_device *fdev, + struct mlx5_fpga_tls_command_context *cmd, + struct mlx5_fpga_dma_buf *resp) +{ + struct mlx5_setup_stream_context *ctx = + container_of(cmd, struct mlx5_setup_stream_context, cmd); + int status = MLX5_FPGA_CMD_SEND_FAILED; + void *tls_cmd = ctx + 1; + + /* If we failed to send to command resp == NULL */ + if (resp) { + ctx->syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); + status = MLX5_FPGA_CMD_RESPONSE_RECEIVED; + } + + status = atomic_xchg_release(&ctx->status, status); + if (likely(status != MLX5_FPGA_CMD_ABANDONED)) { + complete(&ctx->comp); + return; + } + + mlx5_fpga_err(fdev, "Command was abandoned, syndrome = %u\n", + ctx->syndrome); + + if (!ctx->syndrome) { + /* The process was killed while waiting for the context to be + * added, and the add completed successfully. + * We need to destroy the HW context, and we can't can't reuse + * the command context because we might not have received + * the tx completion yet. + */ + mlx5_fpga_tls_del_tx_flow(fdev->mdev, + MLX5_GET(tls_cmd, tls_cmd, swid), + GFP_ATOMIC); + } + + mlx5_fpga_tls_put_command_ctx(cmd); +} + +static int mlx5_fpga_tls_setup_stream_cmd(struct mlx5_core_dev *mdev, + struct mlx5_setup_stream_context *ctx) +{ + struct mlx5_fpga_dma_buf *buf; + void *cmd = ctx + 1; + int status, ret = 0; + + buf = &ctx->cmd.buf; + buf->sg[0].data = cmd; + buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; + MLX5_SET(tls_cmd, cmd, command_type, CMD_SETUP_STREAM); + + init_completion(&ctx->comp); + atomic_set(&ctx->status, MLX5_FPGA_CMD_PENDING); + ctx->syndrome = -1; + + mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd, + mlx5_fpga_tls_setup_completion); + wait_for_completion_killable(&ctx->comp); + + status = atomic_xchg_acquire(&ctx->status, MLX5_FPGA_CMD_ABANDONED); + if (unlikely(status == MLX5_FPGA_CMD_PENDING)) + /* ctx is going to be released in mlx5_fpga_tls_setup_completion */ + return -EINTR; + + if (unlikely(ctx->syndrome)) + ret = -ENOMEM; + + mlx5_fpga_tls_put_command_ctx(&ctx->cmd); + return ret; +} + +static void mlx5_fpga_tls_hw_qp_recv_cb(void *cb_arg, + struct mlx5_fpga_dma_buf *buf) +{ + struct mlx5_fpga_device *fdev = (struct mlx5_fpga_device *)cb_arg; + + mlx5_fpga_tls_cmd_complete(fdev, buf); +} + +bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev) +{ + if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga)) + return false; + + if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) != + MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX) + return false; + + if (MLX5_CAP_FPGA(mdev, sandbox_product_id) != + MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_TLS) + return false; + + if (MLX5_CAP_FPGA(mdev, sandbox_product_version) != 0) + return false; + + return true; +} + +static int mlx5_fpga_tls_get_caps(struct mlx5_fpga_device *fdev, + u32 *p_caps) +{ + int err, cap_size = MLX5_ST_SZ_BYTES(tls_extended_cap); + u32 caps = 0; + void *buf; + + buf = kzalloc(cap_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = mlx5_fpga_get_sbu_caps(fdev, cap_size, buf); + if (err) + goto out; + + if (MLX5_GET(tls_extended_cap, buf, tx)) + caps |= MLX5_ACCEL_TLS_TX; + if (MLX5_GET(tls_extended_cap, buf, rx)) + caps |= MLX5_ACCEL_TLS_RX; + if (MLX5_GET(tls_extended_cap, buf, tls_v12)) + caps |= MLX5_ACCEL_TLS_V12; + if (MLX5_GET(tls_extended_cap, buf, tls_v13)) + caps |= MLX5_ACCEL_TLS_V13; + if (MLX5_GET(tls_extended_cap, buf, lro)) + caps |= MLX5_ACCEL_TLS_LRO; + if (MLX5_GET(tls_extended_cap, buf, ipv6)) + caps |= MLX5_ACCEL_TLS_IPV6; + + if (MLX5_GET(tls_extended_cap, buf, aes_gcm_128)) + caps |= MLX5_ACCEL_TLS_AES_GCM128; + if (MLX5_GET(tls_extended_cap, buf, aes_gcm_256)) + caps |= MLX5_ACCEL_TLS_AES_GCM256; + + *p_caps = caps; + err = 0; +out: + kfree(buf); + return err; +} + +int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev) +{ + struct mlx5_fpga_device *fdev = mdev->fpga; + struct mlx5_fpga_conn_attr init_attr = {0}; + struct mlx5_fpga_conn *conn; + struct mlx5_fpga_tls *tls; + int err = 0; + + if (!mlx5_fpga_is_tls_device(mdev) || !fdev) + return 0; + + tls = kzalloc(sizeof(*tls), GFP_KERNEL); + if (!tls) + return -ENOMEM; + + err = mlx5_fpga_tls_get_caps(fdev, &tls->caps); + if (err) + goto error; + + if (!(tls->caps & (MLX5_ACCEL_TLS_TX | MLX5_ACCEL_TLS_V12 | + MLX5_ACCEL_TLS_AES_GCM128))) { + err = -ENOTSUPP; + goto error; + } + + init_attr.rx_size = SBU_QP_QUEUE_SIZE; + init_attr.tx_size = SBU_QP_QUEUE_SIZE; + init_attr.recv_cb = mlx5_fpga_tls_hw_qp_recv_cb; + init_attr.cb_arg = fdev; + conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr); + if (IS_ERR(conn)) { + err = PTR_ERR(conn); + mlx5_fpga_err(fdev, "Error creating TLS command connection %d\n", + err); + goto error; + } + + tls->conn = conn; + spin_lock_init(&tls->pending_cmds_lock); + INIT_LIST_HEAD(&tls->pending_cmds); + + idr_init(&tls->tx_idr); + spin_lock_init(&tls->idr_spinlock); + fdev->tls = tls; + return 0; + +error: + kfree(tls); + return err; +} + +void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev) +{ + struct mlx5_fpga_device *fdev = mdev->fpga; + + if (!fdev || !fdev->tls) + return; + + mlx5_fpga_sbu_conn_destroy(fdev->tls->conn); + kfree(fdev->tls); + fdev->tls = NULL; +} + +static void mlx5_fpga_tls_set_aes_gcm128_ctx(void *cmd, + struct tls_crypto_info *info, + __be64 *rcd_sn) +{ + struct tls12_crypto_info_aes_gcm_128 *crypto_info = + (struct tls12_crypto_info_aes_gcm_128 *)info; + + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_rcd_sn), crypto_info->rec_seq, + TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); + + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, tls_implicit_iv), + crypto_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key), + crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); + + /* in AES-GCM 128 we need to write the key twice */ + memcpy(MLX5_ADDR_OF(tls_cmd, cmd, encryption_key) + + TLS_CIPHER_AES_GCM_128_KEY_SIZE, + crypto_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); + + MLX5_SET(tls_cmd, cmd, alg, MLX5_TLS_ALG_AES_GCM_128); +} + +static int mlx5_fpga_tls_set_key_material(void *cmd, u32 caps, + struct tls_crypto_info *crypto_info) +{ + __be64 rcd_sn; + + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: + if (!(caps & MLX5_ACCEL_TLS_AES_GCM128)) + return -EINVAL; + mlx5_fpga_tls_set_aes_gcm128_ctx(cmd, crypto_info, &rcd_sn); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int mlx5_fpga_tls_add_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, u32 swid, + u32 tcp_sn) +{ + u32 caps = mlx5_fpga_tls_device_caps(mdev); + struct mlx5_setup_stream_context *ctx; + int ret = -ENOMEM; + size_t cmd_size; + void *cmd; + + cmd_size = MLX5_TLS_COMMAND_SIZE + sizeof(*ctx); + ctx = kzalloc(cmd_size, GFP_KERNEL); + if (!ctx) + goto out; + + cmd = ctx + 1; + ret = mlx5_fpga_tls_set_key_material(cmd, caps, crypto_info); + if (ret) + goto free_ctx; + + mlx5_fpga_tls_flow_to_cmd(flow, cmd); + + MLX5_SET(tls_cmd, cmd, swid, swid); + MLX5_SET(tls_cmd, cmd, tcp_sn, tcp_sn); + + return mlx5_fpga_tls_setup_stream_cmd(mdev, ctx); + +free_ctx: + kfree(ctx); +out: + return ret; +} + +int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid) +{ + struct mlx5_fpga_tls *tls = mdev->fpga->tls; + int ret = -ENOMEM; + u32 swid; + + ret = mlx5_fpga_tls_alloc_swid(&tls->tx_idr, &tls->idr_spinlock, flow); + if (ret < 0) + return ret; + + swid = ret; + MLX5_SET(tls_flow, flow, direction_sx, 1); + + ret = mlx5_fpga_tls_add_flow(mdev, flow, crypto_info, swid, + start_offload_tcp_sn); + if (ret && ret != -EINTR) + goto free_swid; + + *p_swid = swid; + return 0; +free_swid: + mlx5_fpga_tls_release_swid(&tls->tx_idr, &tls->idr_spinlock, swid); + + return ret; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h new file mode 100644 index 000000000000..800a214e4e49 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __MLX5_FPGA_TLS_H__ +#define __MLX5_FPGA_TLS_H__ + +#include <linux/mlx5/driver.h> + +#include <net/tls.h> +#include "fpga/core.h" + +struct mlx5_fpga_tls { + struct list_head pending_cmds; + spinlock_t pending_cmds_lock; /* Protects pending_cmds */ + u32 caps; + struct mlx5_fpga_conn *conn; + + struct idr tx_idr; + spinlock_t idr_spinlock; /* protects the IDR */ +}; + +int mlx5_fpga_tls_add_tx_flow(struct mlx5_core_dev *mdev, void *flow, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn, u32 *p_swid); + +void mlx5_fpga_tls_del_tx_flow(struct mlx5_core_dev *mdev, u32 swid, + gfp_t flags); + +bool mlx5_fpga_is_tls_device(struct mlx5_core_dev *mdev); +int mlx5_fpga_tls_init(struct mlx5_core_dev *mdev); +void mlx5_fpga_tls_cleanup(struct mlx5_core_dev *mdev); + +static inline u32 mlx5_fpga_tls_device_caps(struct mlx5_core_dev *mdev) +{ + return mdev->fpga->tls->caps; +} + +#endif /* __MLX5_FPGA_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index ef5afd7c9325..5a00deff5457 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -372,6 +372,15 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { id = dst->dest_attr.ft->id; + } else if (dst->dest_attr.type == + MLX5_FLOW_DESTINATION_TYPE_VPORT) { + id = dst->dest_attr.vport.num; + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id_valid, + dst->dest_attr.vport.vhca_id_valid); + MLX5_SET(dest_format_struct, in_dests, + destination_eswitch_owner_vhca_id, + dst->dest_attr.vport.vhca_id); } else { id = dst->dest_attr.tir_num; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 56e275199256..e1b609c61d59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1373,6 +1373,8 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, struct mlx5_core_dev *dev = get_dev(&ft->node); int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); void *match_criteria_addr; + u8 src_esw_owner_mask_on; + void *misc; int err; u32 *in; @@ -1385,6 +1387,14 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft, MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index); MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index + fg->max_ftes - 1); + + misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria, + misc_parameters); + src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + MLX5_SET(create_flow_group_in, in, + source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on); + match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); memcpy(match_criteria_addr, fg->mask.match_criteria, @@ -1405,7 +1415,7 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, { if (d1->type == d2->type) { if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && - d1->vport_num == d2->vport_num) || + d1->vport.num == d2->vport.num) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && d1->ft == d2->ft) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && @@ -2484,7 +2494,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering) if (!steering->fdb_root_ns) return -ENOMEM; - prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1); + prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2); if (IS_ERR(prio)) goto out_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 6d9053bcbe95..08eac92fc26c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -33,6 +33,8 @@ #ifndef __MLX5E_IPOB_H__ #define __MLX5E_IPOB_H__ +#ifdef CONFIG_MLX5_CORE_IPOIB + #include <linux/mlx5/fs.h> #include "en.h" @@ -93,8 +95,32 @@ const struct mlx5e_profile *mlx5i_pkey_get_profile(void); /* Extract mlx5e_priv from IPoIB netdev */ #define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv)) +struct mlx5_wqe_eth_pad { + u8 rsvd0[16]; +}; + +struct mlx5i_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_datagram_seg datagram; + struct mlx5_wqe_eth_pad pad; + struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; +}; + +static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, + struct mlx5i_tx_wqe **wqe, + u16 *pi) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + + *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); + memset(*wqe, 0, sizeof(**wqe)); +} + netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +#endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* __MLX5E_IPOB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e2c465b0b3f8..615005e63819 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -60,6 +60,7 @@ #include "fpga/core.h" #include "fpga/ipsec.h" #include "accel/ipsec.h" +#include "accel/tls.h" #include "lib/clock.h" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); @@ -1190,6 +1191,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_ipsec_start; } + err = mlx5_accel_tls_init(dev); + if (err) { + dev_err(&pdev->dev, "TLS device start failed %d\n", err); + goto err_tls_start; + } + err = mlx5_init_fs(dev); if (err) { dev_err(&pdev->dev, "Failed to init flow steering\n"); @@ -1231,6 +1238,9 @@ err_sriov: mlx5_cleanup_fs(dev); err_fs: + mlx5_accel_tls_cleanup(dev); + +err_tls_start: mlx5_accel_ipsec_cleanup(dev); err_ipsec_start: @@ -1306,6 +1316,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); mlx5_accel_ipsec_cleanup(dev); + mlx5_accel_tls_cleanup(dev); mlx5_fpga_device_stop(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index b9736f505bdf..f4f02f775c93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -123,8 +123,8 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key)); write_unlock_irqrestore(&table->lock, flags); if (!deleted_mkey) { - mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n", - mlx5_base_mkey(mkey->key)); + mlx5_core_dbg(dev, "failed radix tree delete of mkey 0x%x\n", + mlx5_base_mkey(mkey->key)); return -ENOENT; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 02d6c5b5d502..4ca07bfb6b14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -407,21 +407,21 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, case MLX5_CMD_OP_RST2INIT_QP: if (MBOX_ALLOC(mbox, rst2init_qp)) return -ENOMEM; - MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); - break; + MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; case MLX5_CMD_OP_INIT2RTR_QP: if (MBOX_ALLOC(mbox, init2rtr_qp)) return -ENOMEM; - MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); - break; + MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; case MLX5_CMD_OP_RTR2RTS_QP: if (MBOX_ALLOC(mbox, rtr2rts_qp)) return -ENOMEM; - MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, - opt_param_mask, qpc); - break; + MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn, + opt_param_mask, qpc); + break; case MLX5_CMD_OP_RTS2RTS_QP: if (MBOX_ALLOC(mbox, rts2rts_qp)) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 177e076b8d17..719cecb182c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -511,7 +511,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.system_image_guid); - kfree(out); + kvfree(out); return 0; } @@ -531,7 +531,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) *node_guid = MLX5_GET64(query_nic_vport_context_out, out, nic_vport_context.node_guid); - kfree(out); + kvfree(out); return 0; } @@ -587,7 +587,7 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, nic_vport_context.qkey_violation_counter); - kfree(out); + kvfree(out); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index ea66448ba365..b97bb72b4db4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -36,7 +36,12 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) { - return (u32)wq->sz_m1 + 1; + return (u32)wq->fbc.sz_m1 + 1; +} + +u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) +{ + return (u32)wq->fbc.frag_sz_m1 + 1; } u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) @@ -46,12 +51,12 @@ u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) { - return (u32)wq->sz_m1 + 1; + return (u32)wq->fbc.sz_m1 + 1; } static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) { - return mlx5_wq_cyc_get_size(wq) << wq->log_stride; + return mlx5_wq_cyc_get_size(wq) << wq->fbc.log_stride; } static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq) @@ -67,17 +72,20 @@ static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) { - return mlx5_wq_ll_get_size(wq) << wq->log_stride; + return mlx5_wq_ll_get_size(wq) << wq->fbc.log_stride; } int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) { + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; int err; - wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); - wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), + MLX5_GET(wq, wqc, log_wq_sz), + fbc); + wq->sz = wq->fbc.sz_m1 + 1; err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -85,14 +93,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->buf = wq_ctrl->buf.frags->buf; + fbc->frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -105,17 +113,35 @@ err_db_free: return err; } +static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, + struct mlx5_wq_qp *qp) +{ + struct mlx5_frag_buf *rqb, *sqb; + + rqb = &qp->rq.fbc.frag_buf; + *rqb = *buf; + rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); + rqb->npages = 1 << get_order(rqb->size); + + sqb = &qp->sq.fbc.frag_buf; + *sqb = *buf; + sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); + sqb->npages = 1 << get_order(sqb->size); + sqb->frags += rqb->npages; /* first part is for the rq */ +} + int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { int err; - wq->rq.log_stride = MLX5_GET(qpc, qpc, log_rq_stride) + 4; - wq->rq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_rq_size)) - 1; - - wq->sq.log_stride = ilog2(MLX5_SEND_WQE_BB); - wq->sq.sz_m1 = (1 << MLX5_GET(qpc, qpc, log_sq_size)) - 1; + mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, + MLX5_GET(qpc, qpc, log_rq_size), + &wq->rq.fbc); + mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB), + MLX5_GET(qpc, qpc, log_sq_size), + &wq->sq.fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -123,15 +149,15 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_qp_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->rq.buf = wq_ctrl->buf.frags->buf; - wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq); + mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); + wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; @@ -147,7 +173,7 @@ err_db_free: int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_frag_wq_ctrl *wq_ctrl) + struct mlx5_wq_ctrl *wq_ctrl) { int err; @@ -160,7 +186,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, } err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), - &wq_ctrl->frag_buf, + &wq_ctrl->buf, param->buf_numa_node); if (err) { mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", @@ -168,7 +194,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, goto err_db_free; } - wq->fbc.frag_buf = wq_ctrl->frag_buf; + wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; wq_ctrl->mdev = mdev; @@ -185,12 +211,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) { + struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; struct mlx5_wqe_srq_next_seg *next_seg; int err; int i; - wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); - wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; + mlx5_fill_fbc(MLX5_GET(wq, wqc, log_wq_stride), + MLX5_GET(wq, wqc, log_wq_sz), + fbc); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); if (err) { @@ -198,17 +226,17 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, return err; } - err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), - &wq_ctrl->buf, param->buf_numa_node); + err = mlx5_frag_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq), + &wq_ctrl->buf, param->buf_numa_node); if (err) { - mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err); + mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n", err); goto err_db_free; } - wq->buf = wq_ctrl->buf.frags->buf; + wq->fbc.frag_buf = wq_ctrl->buf; wq->db = wq_ctrl->db.db; - for (i = 0; i < wq->sz_m1; i++) { + for (i = 0; i < fbc->sz_m1; i++) { next_seg = mlx5_wq_ll_get_wqe(wq, i); next_seg->next_wqe_index = cpu_to_be16(i + 1); } @@ -227,12 +255,7 @@ err_db_free: void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl) { - mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); + mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf); mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); } -void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl) -{ - mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf); - mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index fca90b94596d..0b47126815b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -38,7 +38,6 @@ #include <linux/mlx5/qp.h> struct mlx5_wq_param { - int linear; int buf_numa_node; int db_numa_node; }; @@ -49,17 +48,12 @@ struct mlx5_wq_ctrl { struct mlx5_db db; }; -struct mlx5_frag_wq_ctrl { - struct mlx5_core_dev *mdev; - struct mlx5_frag_buf frag_buf; - struct mlx5_db db; -}; - struct mlx5_wq_cyc { - void *buf; + struct mlx5_frag_buf_ctrl fbc; __be32 *db; - u16 sz_m1; - u8 log_stride; + u16 sz; + u16 wqe_ctr; + u16 cur_sz; }; struct mlx5_wq_qp { @@ -74,20 +68,19 @@ struct mlx5_cqwq { }; struct mlx5_wq_ll { - void *buf; + struct mlx5_frag_buf_ctrl fbc; __be32 *db; __be16 *tail_next; - u16 sz_m1; u16 head; u16 wqe_ctr; u16 cur_sz; - u8 log_stride; }; int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); +u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, @@ -95,7 +88,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, - struct mlx5_frag_wq_ctrl *wq_ctrl); + struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, @@ -104,16 +97,67 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); -void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl); + +static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) +{ + return wq->cur_sz == wq->sz; +} + +static inline int mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq) +{ + return wq->sz - wq->cur_sz; +} + +static inline int mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq) +{ + return !wq->cur_sz; +} + +static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq) +{ + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n) +{ + wq->wqe_ctr += n; + wq->cur_sz += n; +} + +static inline void mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq) +{ + wq->cur_sz--; +} + +static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq) +{ + *wq->db = cpu_to_be32(wq->wqe_ctr); +} static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) { - return ctr & wq->sz_m1; + return ctr & wq->fbc.sz_m1; +} + +static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->fbc.frag_sz_m1; +} + +static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); +} + +static inline u16 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq) +{ + return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); } static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) { - return wq->buf + (ix << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) @@ -124,9 +168,14 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) return !equal && !smaller; } +static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) { - return wq->cc & wq->fbc.sz_m1; + return mlx5_cqwq_ctr2ix(wq, wq->cc); } static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) @@ -134,9 +183,14 @@ static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } +static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) { - return wq->cc >> wq->fbc.log_sz; + return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc); } static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) @@ -167,7 +221,7 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) { - return wq->cur_sz == wq->sz_m1; + return wq->cur_sz == wq->fbc.sz_m1; } static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) @@ -177,7 +231,7 @@ static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) { - return wq->buf + (ix << wq->log_stride); + return mlx5_frag_buf_get_wqe(&wq->fbc, ix); } static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 479511cf79bc..2bc48054b685 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -58,7 +58,7 @@ static inline void mlxsw_cmd_mbox_zero(char *mbox) struct mlxsw_core; int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, - u32 in_mod, bool out_mbox_direct, + u32 in_mod, bool out_mbox_direct, bool reset_ok, char *in_mbox, size_t in_mbox_size, char *out_mbox, size_t out_mbox_size); @@ -67,7 +67,7 @@ static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode, size_t in_mbox_size) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, - in_mbox, in_mbox_size, NULL, 0); + false, in_mbox, in_mbox_size, NULL, 0); } static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, @@ -76,7 +76,7 @@ static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode, char *out_mbox, size_t out_mbox_size) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, - out_mbox_direct, NULL, 0, + out_mbox_direct, false, NULL, 0, out_mbox, out_mbox_size); } @@ -84,7 +84,7 @@ static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod) { return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false, - NULL, 0, NULL, 0); + false, NULL, 0, NULL, 0); } enum mlxsw_cmd_opcode { @@ -179,6 +179,8 @@ enum mlxsw_cmd_status { MLXSW_CMD_STATUS_BAD_INDEX = 0x0A, /* NVMEM checksum/CRC failed. */ MLXSW_CMD_STATUS_BAD_NVMEM = 0x0B, + /* Device is currently running reset */ + MLXSW_CMD_STATUS_RUNNING_RESET = 0x26, /* Bad management packet (silently discarded). */ MLXSW_CMD_STATUS_BAD_PKT = 0x30, }; @@ -208,6 +210,8 @@ static inline const char *mlxsw_cmd_status_str(u8 status) return "BAD_INDEX"; case MLXSW_CMD_STATUS_BAD_NVMEM: return "BAD_NVMEM"; + case MLXSW_CMD_STATUS_RUNNING_RESET: + return "RUNNING_RESET"; case MLXSW_CMD_STATUS_BAD_PKT: return "BAD_PKT"; default: @@ -424,10 +428,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8); MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8); /* cmd_mbox_query_aq_cap_log_max_cq_sz - * Log (base 2) of max CQEs allowed on CQ. + * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1. */ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8); +/* cmd_mbox_query_aq_cap_log_max_cqv2_sz + * Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2. + */ +MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8); + /* cmd_mbox_query_aq_cap_max_num_cqs * Maximum number of CQs. */ @@ -662,6 +671,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); +/* cmd_mbox_config_set_cqe_version + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1); + /* cmd_mbox_config_profile_max_vepa_channels * Maximum number of VEPA channels per port (0 through 16) * 0 - multi-channel VEPA is disabled @@ -841,6 +856,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type, MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties, 0x60, 0, 8, 0x08, 0x00, false); +/* cmd_mbox_config_profile_cqe_version + * CQE version: + * 0: CQE version is 0 + * 1: CQE version is either 1 or 2 + * CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8); + /* ACCESS_REG - Access EMAD Supported Register * ---------------------------------- * OpMod == 0 (N/A), INMmod == 0 (N/A) @@ -850,10 +873,12 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties, */ static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core, + bool reset_ok, char *in_mbox, char *out_mbox) { return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG, - 0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE, + 0, 0, false, reset_ok, + in_mbox, MLXSW_CMD_MBOX_SIZE, out_mbox, MLXSW_CMD_MBOX_SIZE); } @@ -1032,11 +1057,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core, 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE); } -/* cmd_mbox_sw2hw_cq_cv +enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver { + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2, +}; + +/* cmd_mbox_sw2hw_cq_cqe_ver * CQE Version. - * 0 - CQE Version 0, 1 - CQE Version 1 */ -MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4); +MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4); /* cmd_mbox_sw2hw_cq_c_eqn * Event Queue this CQ reports completion events to. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e13ac3b8dff7..f9c724752a32 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -770,27 +770,35 @@ static void mlxsw_core_driver_put(const char *kind) static int mlxsw_devlink_port_split(struct devlink *devlink, unsigned int port_index, - unsigned int count) + unsigned int count, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= mlxsw_core->max_ports) + if (port_index >= mlxsw_core->max_ports) { + NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); return -EINVAL; + } if (!mlxsw_core->driver->port_split) return -EOPNOTSUPP; - return mlxsw_core->driver->port_split(mlxsw_core, port_index, count); + return mlxsw_core->driver->port_split(mlxsw_core, port_index, count, + extack); } static int mlxsw_devlink_port_unsplit(struct devlink *devlink, - unsigned int port_index) + unsigned int port_index, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - if (port_index >= mlxsw_core->max_ports) + if (port_index >= mlxsw_core->max_ports) { + NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports"); return -EINVAL; + } if (!mlxsw_core->driver->port_unsplit) return -EOPNOTSUPP; - return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); + return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index, + extack); } static int @@ -963,17 +971,16 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, pool_type, p_cur, p_max); } -static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink) +static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - const struct mlxsw_bus *mlxsw_bus = mlxsw_core->bus; int err; - if (!mlxsw_bus->reset) + if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET)) return -EOPNOTSUPP; mlxsw_core_bus_device_unregister(mlxsw_core, true); - mlxsw_bus->reset(mlxsw_core->bus_priv); err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, mlxsw_core->bus, mlxsw_core->bus_priv, true, @@ -1480,6 +1487,7 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, { enum mlxsw_emad_op_tlv_status status; int err, n_retry; + bool reset_ok; char *in_mbox, *out_mbox, *tmp; dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", @@ -1501,9 +1509,16 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); mlxsw_emad_pack_reg_tlv(tmp, reg, payload); + /* There is a special treatment needed for MRSR (reset) register. + * The command interface will return error after the command + * is executed, so tell the lower layer to expect it + * and cope accordingly. + */ + reset_ok = reg->id == MLXSW_REG_MRSR_ID; + n_retry = 0; retry: - err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); + err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox); if (!err) { err = mlxsw_emad_process_status(out_mbox, &status); if (err) { @@ -1714,15 +1729,16 @@ EXPORT_SYMBOL(mlxsw_core_port_fini); void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv, struct net_device *dev, - bool split, u32 split_group) + u32 port_number, bool split, + u32 split_port_subnumber) { struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; mlxsw_core_port->port_driver_priv = port_driver_priv; - if (split) - devlink_port_split_set(devlink_port, split_group); + devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + port_number, split, split_port_subnumber); devlink_port_type_eth_set(devlink_port, dev); } EXPORT_SYMBOL(mlxsw_core_port_eth_set); @@ -1762,6 +1778,17 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_port_type_get); +int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, + u8 local_port, char *name, size_t len) +{ + struct mlxsw_core_port *mlxsw_core_port = + &mlxsw_core->ports[local_port]; + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + return devlink_port_get_phys_port_name(devlink_port, name, len); +} +EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name); + static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, const char *buf, size_t size) { @@ -1781,7 +1808,7 @@ static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, } int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, - u32 in_mod, bool out_mbox_direct, + u32 in_mod, bool out_mbox_direct, bool reset_ok, char *in_mbox, size_t in_mbox_size, char *out_mbox, size_t out_mbox_size) { @@ -1804,7 +1831,15 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, in_mbox, in_mbox_size, out_mbox, out_mbox_size, &status); - if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { + if (!err && out_mbox) { + dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); + mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); + } + + if (reset_ok && err == -EIO && + status == MLXSW_CMD_STATUS_RUNNING_RESET) { + err = 0; + } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) { dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n", opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod, status, mlxsw_cmd_status_str(status)); @@ -1814,10 +1849,6 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, in_mod); } - if (!err && out_mbox) { - dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n"); - mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size); - } return err; } EXPORT_SYMBOL(mlxsw_cmd_exec); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 092d39399f3c..552cfa29c2f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -201,13 +201,16 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port); void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv, struct net_device *dev, - bool split, u32 split_group); + u32 port_number, bool split, + u32 split_port_subnumber); void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv); void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port, void *port_driver_priv); enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core, u8 local_port); +int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, + u8 local_port, char *name, size_t len); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); bool mlxsw_core_schedule_work(struct work_struct *work); @@ -271,8 +274,9 @@ struct mlxsw_driver { int (*port_type_set)(struct mlxsw_core *mlxsw_core, u8 local_port, enum devlink_port_type new_type); int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, - unsigned int count); - int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); + unsigned int count, struct netlink_ext_ack *extack); + int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port, + struct netlink_ext_ack *extack); int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); @@ -334,6 +338,7 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, mlxsw_core_res_get(mlxsw_core, MLXSW_RES_ID_##short_res_id) #define MLXSW_BUS_F_TXRX BIT(0) +#define MLXSW_BUS_F_RESET BIT(1) struct mlxsw_bus { const char *kind; @@ -341,7 +346,6 @@ struct mlxsw_bus { const struct mlxsw_config_profile *profile, struct mlxsw_res *res); void (*fini)(void *bus_priv); - void (*reset)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); int (*skb_transmit)(void *bus_priv, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 3a9381977d6d..fc4557245ff4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -117,6 +117,7 @@ struct mlxsw_pci_queue { struct { u32 comp_sdq_count; u32 comp_rdq_count; + enum mlxsw_pci_cqe_v v; } cq; struct { u32 ev_cmd_count; @@ -155,6 +156,8 @@ struct mlxsw_pci { } cmd; struct mlxsw_bus_info bus_info; const struct pci_device_id *id; + enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */ + u8 num_sdq_cqs; /* Number of CQs used for SDQs */ }; static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) @@ -202,24 +205,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) return owner_bit != !!(q->consumer_counter & q->count); } -static char * -mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, - u32 (*get_elem_owner_func)(const char *)) -{ - struct mlxsw_pci_queue_elem_info *elem_info; - char *elem; - bool owner_bit; - - elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); - elem = elem_info->elem; - owner_bit = get_elem_owner_func(elem); - if (mlxsw_pci_elem_hw_owned(q, owner_bit)) - return NULL; - q->consumer_counter++; - rmb(); /* make sure we read owned bit before the rest of elem */ - return elem; -} - static struct mlxsw_pci_queue_type_group * mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_queue_type q_type) @@ -494,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, } } +static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + q->u.cq.v = mlxsw_pci->max_cqe_ver; + + /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */ + if (q->u.cq.v == MLXSW_PCI_CQE_V2 && + q->num < mlxsw_pci->num_sdq_cqs) + q->u.cq.v = MLXSW_PCI_CQE_V1; +} + static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { @@ -505,10 +501,16 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, for (i = 0; i < q->count; i++) { char *elem = mlxsw_pci_queue_elem_get(q, i); - mlxsw_pci_cqe_owner_set(elem, 1); + mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); } - mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ + if (q->u.cq.v == MLXSW_PCI_CQE_V1) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); + else if (q->u.cq.v == MLXSW_PCI_CQE_V2) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); + mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); @@ -559,7 +561,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, - char *cqe) + enum mlxsw_pci_cqe_v cqe_v, char *cqe) { struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_elem_info *elem_info; @@ -579,10 +581,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - if (mlxsw_pci_cqe_lag_get(cqe)) { + if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) { rx_info.is_lag = true; - rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe); - rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe); + rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe); + rx_info.lag_port_index = + mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe); } else { rx_info.is_lag = false; rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); @@ -591,7 +594,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); byte_count = mlxsw_pci_cqe_byte_count_get(cqe); - if (mlxsw_pci_cqe_crc_get(cqe)) + if (mlxsw_pci_cqe_crc_get(cqe_v, cqe)) byte_count -= ETH_FCS_LEN; skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); @@ -608,7 +611,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) { - return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get); + struct mlxsw_pci_queue_elem_info *elem_info; + char *elem; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + elem = elem_info->elem; + owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem); + if (mlxsw_pci_elem_hw_owned(q, owner_bit)) + return NULL; + q->consumer_counter++; + rmb(); /* make sure we read owned bit before the rest of elem */ + return elem; } static void mlxsw_pci_cq_tasklet(unsigned long data) @@ -621,8 +635,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); - u8 sendq = mlxsw_pci_cqe_sr_get(cqe); - u8 dqn = mlxsw_pci_cqe_dqn_get(cqe); + u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); + u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); if (sendq) { struct mlxsw_pci_queue *sdq; @@ -636,7 +650,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, - wqe_counter, cqe); + wqe_counter, q->u.cq.v, cqe); q->u.cq.comp_rdq_count++; } if (++items == credits) @@ -648,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) } } +static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) +{ + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : + MLXSW_PCI_CQE01_COUNT; +} + +static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) +{ + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : + MLXSW_PCI_CQE01_SIZE; +} + static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { @@ -696,7 +722,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) { - return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get); + struct mlxsw_pci_queue_elem_info *elem_info; + char *elem; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + elem = elem_info->elem; + owner_bit = mlxsw_pci_eqe_owner_get(elem); + if (mlxsw_pci_elem_hw_owned(q, owner_bit)) + return NULL; + q->consumer_counter++; + rmb(); /* make sure we read owned bit before the rest of elem */ + return elem; } static void mlxsw_pci_eq_tasklet(unsigned long data) @@ -749,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) struct mlxsw_pci_queue_ops { const char *name; enum mlxsw_pci_queue_type type; + void (*pre_init)(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q); int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q); void (*fini)(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q); void (*tasklet)(unsigned long data); + u16 (*elem_count_f)(const struct mlxsw_pci_queue *q); + u8 (*elem_size_f)(const struct mlxsw_pci_queue *q); u16 elem_count; u8 elem_size; }; @@ -776,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_CQ, + .pre_init = mlxsw_pci_cq_pre_init, .init = mlxsw_pci_cq_init, .fini = mlxsw_pci_cq_fini, .tasklet = mlxsw_pci_cq_tasklet, - .elem_count = MLXSW_PCI_CQE_COUNT, - .elem_size = MLXSW_PCI_CQE_SIZE + .elem_count_f = mlxsw_pci_cq_elem_count, + .elem_size_f = mlxsw_pci_cq_elem_size }; static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { @@ -800,10 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, int i; int err; - spin_lock_init(&q->lock); q->num = q_num; - q->count = q_ops->elem_count; - q->elem_size = q_ops->elem_size; + if (q_ops->pre_init) + q_ops->pre_init(mlxsw_pci, q); + + spin_lock_init(&q->lock); + q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) : + q_ops->elem_count; + q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) : + q_ops->elem_size; q->type = q_ops->type; q->pci = mlxsw_pci; @@ -832,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, elem_info = mlxsw_pci_queue_elem_info_get(q, i); elem_info->elem = - __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i); + __mlxsw_pci_queue_elem_get(q, q->elem_size, i); } mlxsw_cmd_mbox_zero(mbox); @@ -912,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) u8 rdq_log2sz; u8 num_cqs; u8 cq_log2sz; + u8 cqv2_log2sz; u8 num_eqs; u8 eq_log2sz; int err; @@ -927,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); + cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox); num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); @@ -938,12 +987,16 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || - (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) || + (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) || + (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 && + (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) || (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); return -EINVAL; } + mlxsw_pci->num_sdq_cqs = num_sdqs; + err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, num_eqs); if (err) { @@ -1184,6 +1237,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, &profile->swid_config[i]); + if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) { + mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1); + mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1); + } + return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); } @@ -1313,6 +1371,51 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, mbox->mapaddr); } +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, + const struct pci_device_id *id) +{ + unsigned long end; + char mrsr_pl[MLXSW_REG_MRSR_LEN]; + int err; + + mlxsw_reg_mrsr_pack(mrsr_pl); + err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); + if (err) + return err; + if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { + msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + return 0; + } + + /* We must wait for the HW to become responsive once again. */ + msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); + + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + do { + u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); + + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) + break; + cond_resched(); + } while (time_before(jiffies, end)); + return 0; +} + +static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); + if (err < 0) + dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); + return err; +} + +static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) +{ + pci_free_irq_vectors(mlxsw_pci->pdev); +} + static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, struct mlxsw_res *res) @@ -1340,6 +1443,16 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_out_mbox_alloc; + err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); + if (err) + goto err_sw_reset; + + err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); + if (err < 0) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_alloc_irq; + } + err = mlxsw_cmd_query_fw(mlxsw_core, mbox); if (err) goto err_query_fw; @@ -1378,6 +1491,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_query_resources; + if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) && + MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2)) + mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2; + else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) && + MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1)) + mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1; + else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) && + MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) || + !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) { + mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0; + } else { + dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n"); + goto err_cqe_v_check; + } + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); if (err) goto err_config_profile; @@ -1400,6 +1528,7 @@ err_request_eq_irq: mlxsw_pci_aqs_fini(mlxsw_pci); err_aqs_init: err_config_profile: +err_cqe_v_check: err_query_resources: err_boardinfo: mlxsw_pci_fw_area_fini(mlxsw_pci); @@ -1407,6 +1536,9 @@ err_fw_area_init: err_doorbell_page_bar: err_iface_rev: err_query_fw: + mlxsw_pci_free_irq_vectors(mlxsw_pci); +err_alloc_irq: +err_sw_reset: mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); err_out_mbox_alloc: mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); @@ -1422,6 +1554,7 @@ static void mlxsw_pci_fini(void *bus_priv) free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); mlxsw_pci_aqs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); + mlxsw_pci_free_irq_vectors(mlxsw_pci); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); } @@ -1603,58 +1736,6 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, return err; } -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, - const struct pci_device_id *id) -{ - unsigned long end; - - mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); - if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { - msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); - return 0; - } - - /* Reset needs to be written before we read control register, and - * we must wait for the HW to become responsive once again - */ - wmb(); - msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS); - - end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); - do { - u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); - - if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) - break; - cond_resched(); - } while (time_before(jiffies, end)); - return 0; -} - -static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) -{ - pci_free_irq_vectors(mlxsw_pci->pdev); -} - -static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci) -{ - int err; - - err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX); - if (err < 0) - dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n"); - return err; -} - -static void mlxsw_pci_reset(void *bus_priv) -{ - struct mlxsw_pci *mlxsw_pci = bus_priv; - - mlxsw_pci_free_irq_vectors(mlxsw_pci); - mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id); - mlxsw_pci_alloc_irq_vectors(mlxsw_pci); -} - static const struct mlxsw_bus mlxsw_pci_bus = { .kind = "pci", .init = mlxsw_pci_init, @@ -1662,8 +1743,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = { .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, .skb_transmit = mlxsw_pci_skb_transmit, .cmd_exec = mlxsw_pci_cmd_exec, - .features = MLXSW_BUS_F_TXRX, - .reset = mlxsw_pci_reset, + .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET, }; static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -1721,18 +1801,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->pdev = pdev; pci_set_drvdata(pdev, mlxsw_pci); - err = mlxsw_pci_sw_reset(mlxsw_pci, id); - if (err) { - dev_err(&pdev->dev, "Software reset failed\n"); - goto err_sw_reset; - } - - err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci); - if (err < 0) { - dev_err(&pdev->dev, "MSI-X init failed\n"); - goto err_msix_init; - } - mlxsw_pci->bus_info.device_kind = driver_name; mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); mlxsw_pci->bus_info.dev = &pdev->dev; @@ -1749,9 +1817,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_bus_device_register: - mlxsw_pci_free_irq_vectors(mlxsw_pci); -err_msix_init: -err_sw_reset: iounmap(mlxsw_pci->hw_addr); err_ioremap: err_pci_resource_len_check: @@ -1769,7 +1834,6 @@ static void mlxsw_pci_remove(struct pci_dev *pdev) struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); mlxsw_core_bus_device_unregister(mlxsw_pci->core, false); - mlxsw_pci_free_irq_vectors(mlxsw_pci); iounmap(mlxsw_pci->hw_addr); pci_release_regions(mlxsw_pci->pdev); pci_disable_device(mlxsw_pci->pdev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index fb082ad21b00..963155f6a17a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -82,10 +82,12 @@ #define MLXSW_PCI_AQ_PAGES 8 #define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES) #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ -#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ +#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) -#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE) +#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) +#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE) #define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE) #define MLXSW_PCI_EQE_UPDATE_COUNT 0x80 @@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false); */ MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false); +enum mlxsw_pci_cqe_v { + MLXSW_PCI_CQE_V0, + MLXSW_PCI_CQE_V1, + MLXSW_PCI_CQE_V2, +}; + +#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \ +static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ +{ \ + switch (v) { \ + default: \ + case MLXSW_PCI_CQE_V0: \ + return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ + case MLXSW_PCI_CQE_V1: \ + return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ + case MLXSW_PCI_CQE_V2: \ + return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ + } \ +} \ +static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \ + char *cqe, u32 val) \ +{ \ + switch (v) { \ + default: \ + case MLXSW_PCI_CQE_V0: \ + mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ + break; \ + case MLXSW_PCI_CQE_V1: \ + mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ + break; \ + case MLXSW_PCI_CQE_V2: \ + mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ + break; \ + } \ +} + /* pci_cqe_lag * Packet arrives from a port which is a LAG */ -MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); +MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1); +MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1); +mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12); /* pci_cqe_system_port/lag_id * When lag=0: System port on which the packet was received @@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1); * bits [3:0] sub_port on which the packet was received */ MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); -MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12); -MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4); +MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12); +MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16); +mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12); +MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4); +MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8); +mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12); /* pci_cqe_wqe_counter * WQE count of the WQEs completed on the associated dqn @@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9); * Length include CRC. Indicates the length field includes * the packet's CRC. */ -MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1); +MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1); +MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1); +mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12); /* pci_cqe_e * CQE with Error. */ -MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1); +MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1); +MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1); +mlxsw_pci_cqe_item_helpers(e, 0, 12, 12); /* pci_cqe_sr * 1 - Send Queue * 0 - Receive Queue */ -MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1); +MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1); +MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1); +mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12); /* pci_cqe_dqn * Descriptor Queue (DQ) Number. */ -MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5); +MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5); +MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6); +mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12); /* pci_cqe_owner * Ownership bit. */ -MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1); +MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1); +MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1); +mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2); /* pci_eqe_event_type * Event type. diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6218231e379e..1877d9f8a11a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -6833,6 +6833,12 @@ enum mlxsw_reg_mpat_span_type { */ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0, + /* Remote SPAN Ethernet VLAN. + * The packet is forwarded to the monitoring port on the monitoring + * VLAN. + */ + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH = 0x1, + /* Encapsulated Remote SPAN Ethernet L3 GRE. * The packet is encapsulated with GRE header. */ @@ -7028,6 +7034,30 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, mlxsw_reg_mpar_pa_id_set(payload, pa_id); } +/* MRSR - Management Reset and Shutdown Register + * --------------------------------------------- + * MRSR register is used to reset or shutdown the switch or + * the entire system (when applicable). + */ +#define MLXSW_REG_MRSR_ID 0x9023 +#define MLXSW_REG_MRSR_LEN 0x08 + +MLXSW_REG_DEFINE(mrsr, MLXSW_REG_MRSR_ID, MLXSW_REG_MRSR_LEN); + +/* reg_mrsr_command + * Reset/shutdown command + * 0 - do nothing + * 1 - software reset + * Access: WO + */ +MLXSW_ITEM32(reg, mrsr, command, 0x00, 0, 4); + +static inline void mlxsw_reg_mrsr_pack(char *payload) +{ + MLXSW_REG_ZERO(mrsr, payload); + mlxsw_reg_mrsr_command_set(payload, 1); +} + /* MLCR - Management LED Control Register * -------------------------------------- * Controls the system LEDs. @@ -7892,6 +7922,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mcia), MLXSW_REG(mpat), MLXSW_REG(mpar), + MLXSW_REG(mrsr), MLXSW_REG(mlcr), MLXSW_REG(mpsc), MLXSW_REG(mcqi), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 087aad52c195..fd9299ccec72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -43,6 +43,9 @@ enum mlxsw_res_id { MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, MLXSW_RES_ID_MAX_TRAP_GROUPS, + MLXSW_RES_ID_CQE_V0, + MLXSW_RES_ID_CQE_V1, + MLXSW_RES_ID_CQE_V2, MLXSW_RES_ID_COUNTER_POOL_SIZE, MLXSW_RES_ID_MAX_SPAN, MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES, @@ -81,6 +84,9 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, + [MLXSW_RES_ID_CQE_V0] = 0x2210, + [MLXSW_RES_ID_CQE_V1] = 0x2211, + [MLXSW_RES_ID_CQE_V2] = 0x2212, [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410, [MLXSW_RES_ID_MAX_SPAN] = 0x2420, [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index adc6ab2cf429..968b88af2ef5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -441,29 +441,29 @@ static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); } -int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, - u8 state) +enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - enum mlxsw_reg_spms_state spms_state; - char *spms_pl; - int err; - switch (state) { case BR_STATE_FORWARDING: - spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; - break; + return MLXSW_REG_SPMS_STATE_FORWARDING; case BR_STATE_LEARNING: - spms_state = MLXSW_REG_SPMS_STATE_LEARNING; - break; + return MLXSW_REG_SPMS_STATE_LEARNING; case BR_STATE_LISTENING: /* fall-through */ case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_BLOCKING: - spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; - break; + return MLXSW_REG_SPMS_STATE_DISCARDING; default: BUG(); } +} + +int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, + u8 state) +{ + enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spms_pl; + int err; spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); if (!spms_pl) @@ -1238,21 +1238,10 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, size_t len) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - u8 module = mlxsw_sp_port->mapping.module; - u8 width = mlxsw_sp_port->mapping.width; - u8 lane = mlxsw_sp_port->mapping.lane; - int err; - - if (!mlxsw_sp_port->split) - err = snprintf(name, len, "p%d", module + 1); - else - err = snprintf(name, len, "p%ds%d", module + 1, - lane / width); - - if (err >= len) - return -EINVAL; - return 0; + return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core, + mlxsw_sp_port->local_port, + name, len); } static struct mlxsw_sp_port_mall_tc_entry * @@ -2927,8 +2916,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, } mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, - mlxsw_sp_port, dev, mlxsw_sp_port->split, - module); + mlxsw_sp_port, dev, module + 1, + mlxsw_sp_port->split, lane / width); mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); return 0; @@ -3103,7 +3092,8 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, - unsigned int count) + unsigned int count, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; @@ -3115,6 +3105,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); + NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); return -EINVAL; } @@ -3123,11 +3114,13 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (count != 2 && count != 4) { netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); + NL_SET_ERR_MSG_MOD(extack, "Port can only be split into 2 or 4 ports"); return -EINVAL; } if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); + NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); return -EINVAL; } @@ -3136,6 +3129,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, base_port = local_port; if (mlxsw_sp->ports[base_port + 1]) { netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); return -EINVAL; } } else { @@ -3143,6 +3137,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, if (mlxsw_sp->ports[base_port + 1] || mlxsw_sp->ports[base_port + 3]) { netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); return -EINVAL; } } @@ -3164,7 +3159,8 @@ err_port_split_create: return err; } -static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) +static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; @@ -3176,11 +3172,13 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) if (!mlxsw_sp_port) { dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", local_port); + NL_SET_ERR_MSG_MOD(extack, "Port number does not exist"); return -EINVAL; } if (!mlxsw_sp_port->split) { - netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); + netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); + NL_SET_ERR_MSG_MOD(extack, "Port was not split"); return -EINVAL; } @@ -3666,6 +3664,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_lag_init; } + /* Initialize SPAN before router and switchdev, so that those components + * can call mlxsw_sp_span_respin(). + */ + err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + err = mlxsw_sp_switchdev_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); @@ -3684,15 +3691,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_afa_init; } - err = mlxsw_sp_span_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); - goto err_span_init; - } - - /* Initialize router after SPAN is initialized, so that the FIB and - * neighbor event handlers can issue SPAN respin. - */ err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); @@ -3739,14 +3737,14 @@ err_acl_init: err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: - mlxsw_sp_span_fini(mlxsw_sp); -err_span_init: mlxsw_sp_afa_fini(mlxsw_sp); err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); err_counter_pool_init: mlxsw_sp_switchdev_fini(mlxsw_sp); err_switchdev_init: + mlxsw_sp_span_fini(mlxsw_sp); +err_span_init: mlxsw_sp_lag_fini(mlxsw_sp); err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); @@ -3768,10 +3766,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_acl_fini(mlxsw_sp); unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); - mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 804d4d2c8031..4a519d8edec8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -364,6 +364,7 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, u32 maxrate); +enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 stp_state); int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, u8 state); int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1904c0323d39..77b2adb29341 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -442,7 +442,7 @@ struct mlxsw_sp_fib6_entry { struct mlxsw_sp_rt6 { struct list_head list; - struct rt6_info *rt; + struct fib6_info *rt; }; struct mlxsw_sp_lpm_tree { @@ -2770,9 +2770,9 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, struct in6_addr *gw; int ifindex, weight; - ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex; - weight = mlxsw_sp_rt6->rt->rt6i_nh_weight; - gw = &mlxsw_sp_rt6->rt->rt6i_gateway; + ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex; + weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight; + gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw; if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex, weight)) return false; @@ -2838,7 +2838,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) struct net_device *dev; list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { - dev = mlxsw_sp_rt6->rt->dst.dev; + dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev; val ^= dev->ifindex; } @@ -3834,11 +3834,11 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; - struct rt6_info *rt = mlxsw_sp_rt6->rt; + struct fib6_info *rt = mlxsw_sp_rt6->rt; - if (nh->rif && nh->rif->dev == rt->dst.dev && + if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev && ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, - &rt->rt6i_gateway)) + &rt->fib6_nh.nh_gw)) return nh; continue; } @@ -3895,7 +3895,7 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, - list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; + list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; return; } @@ -3905,9 +3905,9 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); if (nh && nh->offloaded) - mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; + mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; else - mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; + mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -3920,9 +3920,9 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, common); list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { - struct rt6_info *rt = mlxsw_sp_rt6->rt; + struct fib6_info *rt = mlxsw_sp_rt6->rt; - rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; + rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -4699,29 +4699,29 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } -static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt) +static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt) { /* Packets with link-local destination IP arriving to the router * are trapped to the CPU, so no need to program specific routes * for them. */ - if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL) + if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) return true; /* Multicast routes aren't supported, so ignore them. Neighbour * Discovery packets are specifically trapped. */ - if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) + if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST) return true; /* Cloned routes are irrelevant in the forwarding path. */ - if (rt->rt6i_flags & RTF_CACHE) + if (rt->fib6_flags & RTF_CACHE) return true; return false; } -static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt) +static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; @@ -4734,18 +4734,18 @@ static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt) * memory. */ mlxsw_sp_rt6->rt = rt; - rt6_hold(rt); + fib6_info_hold(rt); return mlxsw_sp_rt6; } #if IS_ENABLED(CONFIG_IPV6) -static void mlxsw_sp_rt6_release(struct rt6_info *rt) +static void mlxsw_sp_rt6_release(struct fib6_info *rt) { - rt6_release(rt); + fib6_info_release(rt); } #else -static void mlxsw_sp_rt6_release(struct rt6_info *rt) +static void mlxsw_sp_rt6_release(struct fib6_info *rt) { } #endif @@ -4756,13 +4756,13 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) kfree(mlxsw_sp_rt6); } -static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt) +static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt) { /* RTF_CACHE routes are ignored */ - return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; + return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; } -static struct rt6_info * +static struct fib6_info * mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) { return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, @@ -4771,7 +4771,7 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct rt6_info *nrt, bool replace) + const struct fib6_info *nrt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; @@ -4779,21 +4779,21 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { - struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same * virtual router. */ - if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) continue; - if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) break; - if (rt->rt6i_metric < nrt->rt6i_metric) + if (rt->fib6_metric < nrt->fib6_metric) continue; - if (rt->rt6i_metric == nrt->rt6i_metric && + if (rt->fib6_metric == nrt->fib6_metric && mlxsw_sp_fib6_rt_can_mp(rt)) return fib6_entry; - if (rt->rt6i_metric > nrt->rt6i_metric) + if (rt->fib6_metric > nrt->fib6_metric) break; } @@ -4802,7 +4802,7 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, static struct mlxsw_sp_rt6 * mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, - const struct rt6_info *rt) + const struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; @@ -4815,21 +4815,21 @@ mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, } static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp, - const struct rt6_info *rt, + const struct fib6_info *rt, enum mlxsw_sp_ipip_type *ret) { - return rt->dst.dev && - mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->dst.dev, ret); + return rt->fib6_nh.nh_dev && + mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret); } static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp, struct mlxsw_sp_nexthop *nh, - const struct rt6_info *rt) + const struct fib6_info *rt) { const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; - struct net_device *dev = rt->dst.dev; + struct net_device *dev = rt->fib6_nh.nh_dev; struct mlxsw_sp_rif *rif; int err; @@ -4870,13 +4870,13 @@ static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp, struct mlxsw_sp_nexthop *nh, - const struct rt6_info *rt) + const struct fib6_info *rt) { - struct net_device *dev = rt->dst.dev; + struct net_device *dev = rt->fib6_nh.nh_dev; nh->nh_grp = nh_grp; - nh->nh_weight = rt->rt6i_nh_weight; - memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); + nh->nh_weight = rt->fib6_nh.nh_weight; + memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr)); mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); @@ -4897,9 +4897,9 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, } static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, - const struct rt6_info *rt) + const struct fib6_info *rt) { - return rt->rt6i_flags & RTF_GATEWAY || + return rt->fib6_flags & RTF_GATEWAY || mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL); } @@ -4928,7 +4928,7 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt); nh_grp->count = fib6_entry->nrt6; for (i = 0; i < nh_grp->count; i++) { - struct rt6_info *rt = mlxsw_sp_rt6->rt; + struct fib6_info *rt = mlxsw_sp_rt6->rt; nh = &nh_grp->nexthops[i]; err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); @@ -5040,7 +5040,7 @@ err_nexthop6_group_get: static int mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; int err; @@ -5068,7 +5068,7 @@ err_nexthop6_group_update: static void mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_rt6 *mlxsw_sp_rt6; @@ -5084,7 +5084,7 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, - const struct rt6_info *rt) + const struct fib6_info *rt) { /* Packets hitting RTF_REJECT routes need to be discarded by the * stack. We can rely on their destination device not having a @@ -5092,9 +5092,9 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, * local, which will cause them to be trapped with a lower * priority than packets that need to be locally received. */ - if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) + if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; - else if (rt->rt6i_flags & RTF_REJECT) + else if (rt->fib6_flags & RTF_REJECT) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; @@ -5118,7 +5118,7 @@ mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry) static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_entry *fib_entry; @@ -5168,25 +5168,25 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct rt6_info *nrt, bool replace) + const struct fib6_info *nrt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { - struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); - if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id > nrt->fib6_table->tb6_id) continue; - if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) break; - if (replace && rt->rt6i_metric == nrt->rt6i_metric) { + if (replace && rt->fib6_metric == nrt->fib6_metric) { if (mlxsw_sp_fib6_rt_can_mp(rt) == mlxsw_sp_fib6_rt_can_mp(nrt)) return fib6_entry; if (mlxsw_sp_fib6_rt_can_mp(nrt)) fallback = fallback ?: fib6_entry; } - if (rt->rt6i_metric > nrt->rt6i_metric) + if (rt->fib6_metric > nrt->fib6_metric) return fallback ?: fib6_entry; } @@ -5198,7 +5198,7 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, bool replace) { struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; - struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); + struct fib6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); struct mlxsw_sp_fib6_entry *fib6_entry; fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace); @@ -5213,9 +5213,9 @@ mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, struct mlxsw_sp_fib6_entry *last; list_for_each_entry(last, &fib_node->entry_list, common.list) { - struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last); + struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(last); - if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id) + if (nrt->fib6_table->tb6_id > rt->fib6_table->tb6_id) break; fib6_entry = last; } @@ -5268,29 +5268,29 @@ mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, - const struct rt6_info *rt) + const struct fib6_info *rt) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; - vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id); + vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id); if (!vr) return NULL; fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6); - fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr, - sizeof(rt->rt6i_dst.addr), - rt->rt6i_dst.plen); + fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr, + sizeof(rt->fib6_dst.addr), + rt->fib6_dst.plen); if (!fib_node) return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { - struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + struct fib6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); - if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id && - rt->rt6i_metric == iter_rt->rt6i_metric && + if (rt->fib6_table->tb6_id == iter_rt->fib6_table->tb6_id && + rt->fib6_metric == iter_rt->fib6_metric && mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt)) return fib6_entry; } @@ -5316,7 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, - struct rt6_info *rt, bool replace) + struct fib6_info *rt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5325,16 +5325,16 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp->router->aborted) return 0; - if (rt->rt6i_src.plen) + if (rt->fib6_src.plen) return -EINVAL; if (mlxsw_sp_fib6_rt_should_ignore(rt)) return 0; - fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id, - &rt->rt6i_dst.addr, - sizeof(rt->rt6i_dst.addr), - rt->rt6i_dst.plen, + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id, + &rt->fib6_dst.addr, + sizeof(rt->fib6_dst.addr), + rt->fib6_dst.plen, MLXSW_SP_L3_PROTO_IPV6); if (IS_ERR(fib_node)) return PTR_ERR(fib_node); @@ -5373,7 +5373,7 @@ err_fib6_entry_nexthop_add: } static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, - struct rt6_info *rt) + struct fib6_info *rt) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -5725,6 +5725,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; err = mlxsw_sp_router_fib6_add(mlxsw_sp, @@ -5831,12 +5832,13 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: fen6_info = container_of(info, struct fib6_entry_notifier_info, info); fib_work->fen6_info = *fen6_info; - rt6_hold(fib_work->fen6_info.rt); + fib6_info_hold(fib_work->fen6_info.rt); break; } } @@ -5882,24 +5884,24 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event, switch (info->family) { case AF_INET: if (!fib4_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; case AF_INET6: if (!fib6_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; case RTNL_FAMILY_IPMR: if (!ipmr_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; case RTNL_FAMILY_IP6MR: if (!ip6mr_rule_default(rule) && !rule->l3mdev) - err = -1; + err = -EOPNOTSUPP; break; } if (err < 0) - NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload"); + NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported"); return err; } @@ -5926,8 +5928,15 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, case FIB_EVENT_RULE_DEL: err = mlxsw_sp_router_fib_rule_event(event, info, router->mlxsw_sp); - if (!err) - return NOTIFY_DONE; + if (!err || info->extack) + return notifier_from_errno(err); + break; + case FIB_EVENT_ENTRY_ADD: + if (router->aborted) { + NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route"); + return notifier_from_errno(-EINVAL); + } + break; } fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 65a77708ff61..3d187d88cc7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -32,6 +32,7 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include <linux/if_bridge.h> #include <linux/list.h> #include <net/arp.h> #include <net/gre.h> @@ -39,8 +40,9 @@ #include <net/ip6_tunnel.h> #include "spectrum.h" -#include "spectrum_span.h" #include "spectrum_ipip.h" +#include "spectrum_span.h" +#include "spectrum_switchdev.h" int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) { @@ -135,14 +137,14 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { static int mlxsw_sp_span_dmac(struct neigh_table *tbl, const void *pkey, - struct net_device *l3edev, + struct net_device *dev, unsigned char dmac[ETH_ALEN]) { - struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev); + struct neighbour *neigh = neigh_lookup(tbl, pkey, dev); int err = 0; if (!neigh) { - neigh = neigh_create(tbl, pkey, l3edev); + neigh = neigh_create(tbl, pkey, dev); if (IS_ERR(neigh)) return PTR_ERR(neigh); } @@ -167,8 +169,99 @@ mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp) return 0; } +static struct net_device * +mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev, + unsigned char *dmac, + u16 *p_vid) +{ + struct bridge_vlan_info vinfo; + struct net_device *edev; + u16 vid = *p_vid; + + if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid))) + return NULL; + if (!vid || + br_vlan_get_info(br_dev, vid, &vinfo) || + !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY)) + return NULL; + + edev = br_fdb_find_port(br_dev, dmac, vid); + if (!edev) + return NULL; + + if (br_vlan_get_info(edev, vid, &vinfo)) + return NULL; + if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED) + *p_vid = 0; + else + *p_vid = vid; + return edev; +} + +static struct net_device * +mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev, + unsigned char *dmac) +{ + return br_fdb_find_port(br_dev, dmac, 0); +} + +static struct net_device * +mlxsw_sp_span_entry_bridge(const struct net_device *br_dev, + unsigned char dmac[ETH_ALEN], + u16 *p_vid) +{ + struct mlxsw_sp_bridge_port *bridge_port; + enum mlxsw_reg_spms_state spms_state; + struct net_device *dev = NULL; + struct mlxsw_sp_port *port; + u8 stp_state; + + if (br_vlan_enabled(br_dev)) + dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid); + else if (!*p_vid) + dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac); + if (!dev) + return NULL; + + port = mlxsw_sp_port_dev_lower_find(dev); + if (!port) + return NULL; + + bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev); + if (!bridge_port) + return NULL; + + stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port); + spms_state = mlxsw_sp_stp_spms_state(stp_state); + if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING) + return NULL; + + return dev; +} + +static struct net_device * +mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev, + u16 *p_vid) +{ + *p_vid = vlan_dev_vlan_id(vlan_dev); + return vlan_dev_real_dev(vlan_dev); +} + +static struct net_device * +mlxsw_sp_span_entry_lag(struct net_device *lag_dev) +{ + struct net_device *dev; + struct list_head *iter; + + netdev_for_each_lower_dev(lag_dev, dev, iter) + if ((dev->flags & IFF_UP) && mlxsw_sp_port_dev_check(dev)) + return dev; + + return NULL; +} + static __maybe_unused int -mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, +mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev, union mlxsw_sp_l3addr saddr, union mlxsw_sp_l3addr daddr, union mlxsw_sp_l3addr gw, @@ -177,21 +270,51 @@ mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev, struct mlxsw_sp_span_parms *sparmsp) { unsigned char dmac[ETH_ALEN]; + u16 vid = 0; if (mlxsw_sp_l3addr_is_zero(gw)) gw = daddr; - if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) || - mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac)) - return mlxsw_sp_span_entry_unoffloadable(sparmsp); + if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac)) + goto unoffloadable; - sparmsp->dest_port = netdev_priv(l3edev); + if (is_vlan_dev(edev)) + edev = mlxsw_sp_span_entry_vlan(edev, &vid); + + if (netif_is_bridge_master(edev)) { + edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid); + if (!edev) + goto unoffloadable; + } + + if (is_vlan_dev(edev)) { + if (vid || !(edev->flags & IFF_UP)) + goto unoffloadable; + edev = mlxsw_sp_span_entry_vlan(edev, &vid); + } + + if (netif_is_lag_master(edev)) { + if (!(edev->flags & IFF_UP)) + goto unoffloadable; + edev = mlxsw_sp_span_entry_lag(edev); + if (!edev) + goto unoffloadable; + } + + if (!mlxsw_sp_port_dev_check(edev)) + goto unoffloadable; + + sparmsp->dest_port = netdev_priv(edev); sparmsp->ttl = ttl; memcpy(sparmsp->dmac, dmac, ETH_ALEN); - memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN); + memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN); sparmsp->saddr = saddr; sparmsp->daddr = daddr; + sparmsp->vid = vid; return 0; + +unoffloadable: + return mlxsw_sp_span_entry_unoffloadable(sparmsp); } #if IS_ENABLED(CONFIG_NET_IPGRE) @@ -268,9 +391,10 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, - sparms.dmac, false); + sparms.dmac, !!sparms.vid); mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl, sparms.ttl, sparms.smac, be32_to_cpu(sparms.saddr.addr4), @@ -368,9 +492,10 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, - sparms.dmac, false); + sparms.dmac, !!sparms.vid); mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac, sparms.saddr.addr6, sparms.daddr.addr6); @@ -394,6 +519,61 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { }; #endif +static bool +mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) +{ + return is_vlan_dev(dev) && + mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev)); +} + +static int +mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + struct net_device *real_dev; + u16 vid; + + if (!(to_dev->flags & IFF_UP)) + return mlxsw_sp_span_entry_unoffloadable(sparmsp); + + real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); + sparmsp->dest_port = netdev_priv(real_dev); + sparmsp->vid = vid; + return 0; +} + +static int +mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + struct mlxsw_sp_port *dest_port = sparms.dest_port; + struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp; + u8 local_port = dest_port->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); + mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); +} + +static void +mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + mlxsw_sp_span_entry_deconfigure_common(span_entry, + MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { + .can_handle = mlxsw_sp_span_vlan_can_handle, + .parms = mlxsw_sp_span_entry_vlan_parms, + .configure = mlxsw_sp_span_entry_vlan_configure, + .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure, +}; + static const struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { &mlxsw_sp_span_entry_ops_phys, @@ -403,6 +583,7 @@ struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { #if IS_ENABLED(CONFIG_IPV6_GRE) &mlxsw_sp_span_entry_ops_gretap6, #endif + &mlxsw_sp_span_entry_ops_vlan, }; static int @@ -766,7 +947,7 @@ int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); if (!span_entry) - return -ENOENT; + return -ENOBUFS; netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", span_entry->id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 4b87ec20e658..14a6de904db1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -63,6 +63,7 @@ struct mlxsw_sp_span_parms { unsigned char smac[ETH_ALEN]; union mlxsw_sp_l3addr daddr; union mlxsw_sp_l3addr saddr; + u16 vid; }; struct mlxsw_sp_span_entry_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 4ed01182a82c..e97652c40d13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -49,7 +49,9 @@ #include <linux/netlink.h> #include <net/switchdev.h> +#include "spectrum_span.h" #include "spectrum_router.h" +#include "spectrum_switchdev.h" #include "spectrum.h" #include "core.h" #include "reg.h" @@ -239,7 +241,7 @@ __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device, return NULL; } -static struct mlxsw_sp_bridge_port * +struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, struct net_device *brport_dev) { @@ -922,6 +924,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, break; } + if (switchdev_trans_ph_commit(trans)) + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + return err; } @@ -1139,6 +1144,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; + if (netif_is_bridge_master(orig_dev)) + return -EOPNOTSUPP; + if (switchdev_trans_ph_prepare(trans)) return 0; @@ -1646,18 +1654,57 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, } } +struct mlxsw_sp_span_respin_work { + struct work_struct work; + struct mlxsw_sp *mlxsw_sp; +}; + +static void mlxsw_sp_span_respin_work(struct work_struct *work) +{ + struct mlxsw_sp_span_respin_work *respin_work = + container_of(work, struct mlxsw_sp_span_respin_work, work); + + rtnl_lock(); + mlxsw_sp_span_respin(respin_work->mlxsw_sp); + rtnl_unlock(); + kfree(respin_work); +} + +static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_span_respin_work *respin_work; + + respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC); + if (!respin_work) + return; + + INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work); + respin_work->mlxsw_sp = mlxsw_sp; + + mlxsw_core_schedule_work(&respin_work->work); +} + static int mlxsw_sp_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + const struct switchdev_obj_port_vlan *vlan; int err = 0; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, - SWITCHDEV_OBJ_PORT_VLAN(obj), - trans); + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans); + + if (switchdev_trans_ph_prepare(trans)) { + /* The event is emitted before the changes are actually + * applied to the bridge. Therefore schedule the respin + * call for later, so that the respin logic sees the + * updated bridge state. + */ + mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); + } break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = mlxsw_sp_port_mdb_add(mlxsw_sp_port, @@ -1697,6 +1744,9 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; u16 vid; + if (netif_is_bridge_master(orig_dev)) + return -EOPNOTSUPP; + bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (WARN_ON(!bridge_port)) return -EINVAL; @@ -1806,6 +1856,8 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, break; } + mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp); + return err; } @@ -2222,6 +2274,8 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true); if (err) break; @@ -2231,10 +2285,20 @@ static void mlxsw_sp_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false); break; + case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_DEL_TO_BRIDGE: + /* These events are only used to potentially update an existing + * SPAN mirror. + */ + break; } + mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp); + out: rtnl_unlock(); kfree(switchdev_work->fdb_info.addr); @@ -2263,7 +2327,9 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, switch (event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ - case SWITCHDEV_FDB_DEL_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */ + case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */ + case SWITCHDEV_FDB_DEL_TO_BRIDGE: memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); @@ -2295,6 +2361,12 @@ static struct notifier_block mlxsw_sp_switchdev_notifier = { .notifier_call = mlxsw_sp_switchdev_event, }; +u8 +mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port) +{ + return bridge_port->stp_state; +} + static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h new file mode 100644 index 000000000000..bc44d5effc28 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 + * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.h + * Copyright (c) 2018 Mellanox Technologies. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/netdevice.h> + +struct mlxsw_sp_bridge; +struct mlxsw_sp_bridge_port; + +struct mlxsw_sp_bridge_port * +mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, + struct net_device *brport_dev); + +u8 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index a655c5850aa6..3922c1cfe5f5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -417,13 +417,10 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, size_t len) { struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); - int err; - - err = snprintf(name, len, "p%d", mlxsw_sx_port->mapping.module + 1); - if (err >= len) - return -EINVAL; - return 0; + return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core, + mlxsw_sx_port->local_port, + name, len); } static const struct net_device_ops mlxsw_sx_port_netdev_ops = { @@ -1149,7 +1146,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, } mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port, - mlxsw_sx_port, dev, false, 0); + mlxsw_sx_port, dev, module + 1, false, 0); mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig new file mode 100644 index 000000000000..36c84625d54e --- /dev/null +++ b/drivers/net/ethernet/mscc/Kconfig @@ -0,0 +1,30 @@ +# SPDX-License-Identifier: (GPL-2.0 OR MIT) +config NET_VENDOR_MICROSEMI + bool "Microsemi devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Microsemi devices. + +if NET_VENDOR_MICROSEMI + +config MSCC_OCELOT_SWITCH + tristate "Ocelot switch driver" + depends on NET_SWITCHDEV + depends on HAS_IOMEM + select PHYLIB + select REGMAP_MMIO + help + This driver supports the Ocelot network switch device. + +config MSCC_OCELOT_SWITCH_OCELOT + tristate "Ocelot switch driver on Ocelot" + depends on MSCC_OCELOT_SWITCH + help + This driver supports the Ocelot network switch device as present on + the Ocelot SoCs. + +endif # NET_VENDOR_MICROSEMI diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile new file mode 100644 index 000000000000..cb52a3b128ae --- /dev/null +++ b/drivers/net/ethernet/mscc/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: (GPL-2.0 OR MIT) +obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o +mscc_ocelot_common-y := ocelot.o ocelot_io.o +mscc_ocelot_common-y += ocelot_regs.o +obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c new file mode 100644 index 000000000000..c8c74aa548d9 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -0,0 +1,1333 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_bridge.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/skbuff.h> +#include <net/arp.h> +#include <net/netevent.h> +#include <net/rtnetlink.h> +#include <net/switchdev.h> + +#include "ocelot.h" + +/* MAC table entry types. + * ENTRYTYPE_NORMAL is subject to aging. + * ENTRYTYPE_LOCKED is not subject to aging. + * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. + * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. + */ +enum macaccess_entry_type { + ENTRYTYPE_NORMAL = 0, + ENTRYTYPE_LOCKED, + ENTRYTYPE_MACv4, + ENTRYTYPE_MACv6, +}; + +struct ocelot_mact_entry { + u8 mac[ETH_ALEN]; + u16 vid; + enum macaccess_entry_type type; +}; + +static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) +{ + unsigned int val, timeout = 10; + + /* Wait for the issued mac table command to be completed, or timeout. + * When the command read from ANA_TABLES_MACACCESS is + * MACACCESS_CMD_IDLE, the issued command completed successfully. + */ + do { + val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); + val &= ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M; + } while (val != MACACCESS_CMD_IDLE && timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static void ocelot_mact_select(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + u32 macl = 0, mach = 0; + + /* Set the MAC address to handle and the vlan associated in a format + * understood by the hardware. + */ + mach |= vid << 16; + mach |= mac[0] << 8; + mach |= mac[1] << 0; + macl |= mac[2] << 24; + macl |= mac[3] << 16; + macl |= mac[4] << 8; + macl |= mac[5] << 0; + + ocelot_write(ocelot, macl, ANA_TABLES_MACLDATA); + ocelot_write(ocelot, mach, ANA_TABLES_MACHDATA); + +} + +static int ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a write command */ + ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID | + ANA_TABLES_MACACCESS_DEST_IDX(port) | + ANA_TABLES_MACACCESS_ENTRYTYPE(type) | + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN), + ANA_TABLES_MACACCESS); + + return ocelot_mact_wait_for_completion(ocelot); +} + +static int ocelot_mact_forget(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], + unsigned int vid) +{ + ocelot_mact_select(ocelot, mac, vid); + + /* Issue a forget command */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET), + ANA_TABLES_MACACCESS); + + return ocelot_mact_wait_for_completion(ocelot); +} + +static void ocelot_mact_init(struct ocelot *ocelot) +{ + /* Configure the learning mode entries attributes: + * - Do not copy the frame to the CPU extraction queues. + * - Use the vlan and mac_cpoy for dmac lookup. + */ + ocelot_rmw(ocelot, 0, + ANA_AGENCTRL_LEARN_CPU_COPY | ANA_AGENCTRL_IGNORE_DMAC_FLAGS + | ANA_AGENCTRL_LEARN_FWD_KILL + | ANA_AGENCTRL_LEARN_IGNORE_VLAN, + ANA_AGENCTRL); + + /* Clear the MAC table */ + ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); +} + +static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) +{ + unsigned int val, timeout = 10; + + /* Wait for the issued mac table command to be completed, or timeout. + * When the command read from ANA_TABLES_MACACCESS is + * MACACCESS_CMD_IDLE, the issued command completed successfully. + */ + do { + val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS); + val &= ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M; + } while (val != ANA_TABLES_VLANACCESS_CMD_IDLE && timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + +static void ocelot_vlan_init(struct ocelot *ocelot) +{ + /* Clear VLAN table, by default all ports are members of all VLANs */ + ocelot_write(ocelot, ANA_TABLES_VLANACCESS_CMD_INIT, + ANA_TABLES_VLANACCESS); + ocelot_vlant_wait_for_completion(ocelot); +} + +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +static u16 ocelot_wm_enc(u16 value) +{ + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} + +static void ocelot_port_adjust_link(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + u8 p = port->chip_port; + int speed, atop_wm, mode = 0; + + switch (dev->phydev->speed) { + case SPEED_10: + speed = OCELOT_SPEED_10; + break; + case SPEED_100: + speed = OCELOT_SPEED_100; + break; + case SPEED_1000: + speed = OCELOT_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + break; + case SPEED_2500: + speed = OCELOT_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + break; + default: + netdev_err(dev, "Unsupported PHY speed: %d\n", + dev->phydev->speed); + return; + } + + phy_print_status(dev->phydev); + + if (!dev->phydev->link) + return; + + /* Only full duplex supported for now */ + ocelot_port_writel(port, DEV_MAC_MODE_CFG_FDX_ENA | + mode, DEV_MAC_MODE_CFG); + + /* Set MAC IFG Gaps + * FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 0 + * !FDX: TX_IFG = 5, RX_IFG1 = RX_IFG2 = 5 + */ + ocelot_port_writel(port, DEV_MAC_IFG_CFG_TX_IFG(5), DEV_MAC_IFG_CFG); + + /* Load seed (0) and set MAC HDX late collision */ + ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67) | + DEV_MAC_HDX_CFG_SEED_LOAD, + DEV_MAC_HDX_CFG); + mdelay(1); + ocelot_port_writel(port, DEV_MAC_HDX_CFG_LATE_COL_POS(67), + DEV_MAC_HDX_CFG); + + /* Disable HDX fast control */ + ocelot_port_writel(port, DEV_PORT_MISC_HDX_FAST_DIS, DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(port, PCS1G_MODE_CFG_SGMII_MODE_ENA, PCS1G_MODE_CFG); + ocelot_port_writel(port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(port, 0, PCS1G_LB_CFG); + + /* Set Max Length and maximum tags allowed */ + ocelot_port_writel(port, VLAN_ETH_FRAME_LEN, DEV_MAC_MAXLEN_CFG); + ocelot_port_writel(port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) | + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, + DEV_MAC_TAGS_CFG); + + /* Enable MAC module */ + ocelot_port_writel(port, DEV_MAC_ENA_CFG_RX_ENA | + DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); + + /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of + * reset */ + ocelot_port_writel(port, DEV_CLOCK_CFG_LINK_SPEED(speed), + DEV_CLOCK_CFG); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_HIGH_CFG); + ocelot_port_writel(port, 0, DEV_MAC_FC_MAC_LOW_CFG); + + /* No PFC */ + ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed), + ANA_PFC_PFC_CFG, p); + + /* Set Pause WM hysteresis + * 152 = 6 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ + * 101 = 4 * VLAN_ETH_FRAME_LEN / OCELOT_BUFFER_CELL_SZ + */ + ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | + SYS_PAUSE_CFG_PAUSE_STOP(101) | + SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, p); + + /* Core: Enable port for frame transfer */ + ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, p); + + /* Flow control */ + ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | + SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | + SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | + SYS_MAC_FC_CFG_FC_LINK_SPEED(speed), + SYS_MAC_FC_CFG, p); + ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, p); + + /* Tail dropping watermark */ + atop_wm = (ocelot->shared_queue_sz - 9 * VLAN_ETH_FRAME_LEN) / OCELOT_BUFFER_CELL_SZ; + ocelot_write_rix(ocelot, ocelot_wm_enc(9 * VLAN_ETH_FRAME_LEN), + SYS_ATOP, p); + ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); +} + +static int ocelot_port_open(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int err; + + /* Enable receiving frames on the port, and activate auto-learning of + * MAC addresses. + */ + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | + ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(port->chip_port), + ANA_PORT_PORT_CFG, port->chip_port); + + err = phy_connect_direct(dev, port->phy, &ocelot_port_adjust_link, + PHY_INTERFACE_MODE_NA); + if (err) { + netdev_err(dev, "Could not attach to PHY\n"); + return err; + } + + dev->phydev = port->phy; + + phy_attached_info(port->phy); + phy_start(port->phy); + return 0; +} + +static int ocelot_port_stop(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + + phy_disconnect(port->phy); + + dev->phydev = NULL; + + ocelot_port_writel(port, 0, DEV_MAC_ENA_CFG); + ocelot_rmw_rix(port->ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, port->chip_port); + return 0; +} + +/* Generate the IFH for frame injection + * + * The IFH is a 128bit-value + * bit 127: bypass the analyzer processing + * bit 56-67: destination mask + * bit 28-29: pop_cnt: 3 disables all rewriting of the frame + * bit 20-27: cpu extraction queue mask + * bit 16: tag type 0: C-tag, 1: S-tag + * bit 0-11: VID + */ +static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) +{ + ifh[0] = IFH_INJ_BYPASS; + ifh[1] = (0xff00 & info->port) >> 8; + ifh[2] = (0xff & info->port) << 24; + ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) | + (info->tag_type << 16) | info->vid; + + return 0; +} + +static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + u32 val, ifh[IFH_LEN]; + struct frame_info info = {}; + u8 grp = 0; /* Send everything on CPU group 0 */ + unsigned int i, count, last; + + val = ocelot_read(ocelot, QS_INJ_STATUS); + if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) || + (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))) + return NETDEV_TX_BUSY; + + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); + + info.port = BIT(port->chip_port); + info.cpuq = 0xff; + ocelot_gen_ifh(ifh, &info); + + for (i = 0; i < IFH_LEN; i++) + ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp); + + count = (skb->len + 3) / 4; + last = skb->len % 4; + for (i = 0; i < count; i++) { + ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); + } + + /* Add padding */ + while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + i++; + } + + /* Indicate EOF and valid bytes in last word */ + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | + QS_INJ_CTRL_EOF, + QS_INJ_CTRL, grp); + + /* Add dummy CRC */ + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + skb_tx_timestamp(skb); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static void ocelot_mact_mc_reset(struct ocelot_port *port) +{ + struct ocelot *ocelot = port->ocelot; + struct netdev_hw_addr *ha, *n; + + /* Free and forget all the MAC addresses stored in the port private mc + * list. These are mc addresses that were previously added by calling + * ocelot_mact_mc_add(). + */ + list_for_each_entry_safe(ha, n, &port->mc, list) { + ocelot_mact_forget(ocelot, ha->addr, port->pvid); + list_del(&ha->list); + kfree(ha); + } +} + +static int ocelot_mact_mc_add(struct ocelot_port *port, + struct netdev_hw_addr *hw_addr) +{ + struct ocelot *ocelot = port->ocelot; + struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); + + if (!ha) + return -ENOMEM; + + memcpy(ha, hw_addr, sizeof(*ha)); + list_add_tail(&ha->list, &port->mc); + + ocelot_mact_learn(ocelot, PGID_CPU, ha->addr, port->pvid, + ENTRYTYPE_LOCKED); + + return 0; +} + +static void ocelot_set_rx_mode(struct net_device *dev) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + struct netdev_hw_addr *ha; + int i; + u32 val; + + /* This doesn't handle promiscuous mode because the bridge core is + * setting IFF_PROMISC on all slave interfaces and all frames would be + * forwarded to the CPU port. + */ + val = GENMASK(ocelot->num_phys_ports - 1, 0); + for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); + + /* Handle the device multicast addresses. First remove all the + * previously installed addresses and then add the latest ones to the + * mac table. + */ + ocelot_mact_mc_reset(port); + netdev_for_each_mc_addr(ha, dev) + ocelot_mact_mc_add(port, ha); +} + +static int ocelot_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct ocelot_port *port = netdev_priv(dev); + int ret; + + ret = snprintf(buf, len, "p%d", port->chip_port); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int ocelot_port_set_mac_address(struct net_device *dev, void *p) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + const struct sockaddr *addr = p; + + /* Learn the new net device MAC address in the mac table. */ + ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, port->pvid, + ENTRYTYPE_LOCKED); + /* Then forget the previous one. */ + ocelot_mact_forget(ocelot, dev->dev_addr, port->pvid); + + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; +} + +static void ocelot_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + /* Configure the port to read the stats from */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port->chip_port), + SYS_STAT_CFG); + + /* Get Rx stats */ + stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS); + stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) + + ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) + + ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) + + ocelot_read(ocelot, SYS_COUNT_RX_LONGS) + + ocelot_read(ocelot, SYS_COUNT_RX_64) + + ocelot_read(ocelot, SYS_COUNT_RX_65_127) + + ocelot_read(ocelot, SYS_COUNT_RX_128_255) + + ocelot_read(ocelot, SYS_COUNT_RX_256_1023) + + ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) + + ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX); + stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST); + stats->rx_dropped = dev->stats.rx_dropped; + + /* Get Tx stats */ + stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS); + stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) + + ocelot_read(ocelot, SYS_COUNT_TX_65_127) + + ocelot_read(ocelot, SYS_COUNT_TX_128_511) + + ocelot_read(ocelot, SYS_COUNT_TX_512_1023) + + ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) + + ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX); + stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) + + ocelot_read(ocelot, SYS_COUNT_TX_AGING); + stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION); +} + +static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 vid, u16 flags) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + return ocelot_mact_learn(ocelot, port->chip_port, addr, vid, + ENTRYTYPE_NORMAL); +} + +static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + return ocelot_mact_forget(ocelot, addr, vid); +} + +struct ocelot_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +static int ocelot_fdb_do_dump(struct ocelot_mact_entry *entry, + struct ocelot_dump_ctx *dump) +{ + u32 portid = NETLINK_CB(dump->cb->skb).portid; + u32 seq = dump->cb->nlh->nlmsg_seq; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + if (dump->idx < dump->cb->args[2]) + goto skip; + + nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, + sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dump->dev->ifindex; + ndm->ndm_state = NUD_REACHABLE; + + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, entry->mac)) + goto nla_put_failure; + + if (entry->vid && nla_put_u16(dump->skb, NDA_VLAN, entry->vid)) + goto nla_put_failure; + + nlmsg_end(dump->skb, nlh); + +skip: + dump->idx++; + return 0; + +nla_put_failure: + nlmsg_cancel(dump->skb, nlh); + return -EMSGSIZE; +} + +static inline int ocelot_mact_read(struct ocelot_port *port, int row, int col, + struct ocelot_mact_entry *entry) +{ + struct ocelot *ocelot = port->ocelot; + char mac[ETH_ALEN]; + u32 val, dst, macl, mach; + + /* Set row and column to read from */ + ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_M_INDEX, row); + ocelot_field_write(ocelot, ANA_TABLES_MACTINDX_BUCKET, col); + + /* Issue a read command */ + ocelot_write(ocelot, + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_READ), + ANA_TABLES_MACACCESS); + + if (ocelot_mact_wait_for_completion(ocelot)) + return -ETIMEDOUT; + + /* Read the entry flags */ + val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); + if (!(val & ANA_TABLES_MACACCESS_VALID)) + return -EINVAL; + + /* If the entry read has another port configured as its destination, + * do not report it. + */ + dst = (val & ANA_TABLES_MACACCESS_DEST_IDX_M) >> 3; + if (dst != port->chip_port) + return -EINVAL; + + /* Get the entry's MAC address and VLAN id */ + macl = ocelot_read(ocelot, ANA_TABLES_MACLDATA); + mach = ocelot_read(ocelot, ANA_TABLES_MACHDATA); + + mac[0] = (mach >> 8) & 0xff; + mac[1] = (mach >> 0) & 0xff; + mac[2] = (macl >> 24) & 0xff; + mac[3] = (macl >> 16) & 0xff; + mac[4] = (macl >> 8) & 0xff; + mac[5] = (macl >> 0) & 0xff; + + entry->vid = (mach >> 16) & 0xfff; + ether_addr_copy(entry->mac, mac); + + return 0; +} + +static int ocelot_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, int *idx) +{ + struct ocelot_port *port = netdev_priv(dev); + int i, j, ret = 0; + struct ocelot_dump_ctx dump = { + .dev = dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + + struct ocelot_mact_entry entry; + + /* Loop through all the mac tables entries. There are 1024 rows of 4 + * entries. + */ + for (i = 0; i < 1024; i++) { + for (j = 0; j < 4; j++) { + ret = ocelot_mact_read(port, i, j, &entry); + /* If the entry is invalid (wrong port, invalid...), + * skip it. + */ + if (ret == -EINVAL) + continue; + else if (ret) + goto end; + + ret = ocelot_fdb_do_dump(&entry, &dump); + if (ret) + goto end; + } + } + +end: + *idx = dump.idx; + return ret; +} + +static const struct net_device_ops ocelot_port_netdev_ops = { + .ndo_open = ocelot_port_open, + .ndo_stop = ocelot_port_stop, + .ndo_start_xmit = ocelot_port_xmit, + .ndo_set_rx_mode = ocelot_set_rx_mode, + .ndo_get_phys_port_name = ocelot_port_get_phys_port_name, + .ndo_set_mac_address = ocelot_port_set_mac_address, + .ndo_get_stats64 = ocelot_get_stats64, + .ndo_fdb_add = ocelot_fdb_add, + .ndo_fdb_del = ocelot_fdb_del, + .ndo_fdb_dump = ocelot_fdb_dump, +}; + +static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) +{ + struct ocelot_port *port = netdev_priv(netdev); + struct ocelot *ocelot = port->ocelot; + int i; + + if (sset != ETH_SS_STATS) + return; + + for (i = 0; i < ocelot->num_stats; i++) + memcpy(data + i * ETH_GSTRING_LEN, ocelot->stats_layout[i].name, + ETH_GSTRING_LEN); +} + +static void ocelot_check_stats(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work); + int i, j; + + mutex_lock(&ocelot->stats_lock); + + for (i = 0; i < ocelot->num_phys_ports; i++) { + /* Configure the port to read the stats from */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG); + + for (j = 0; j < ocelot->num_stats; j++) { + u32 val; + unsigned int idx = i * ocelot->num_stats + j; + + val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS, + ocelot->stats_layout[j].offset); + + if (val < (ocelot->stats[idx] & U32_MAX)) + ocelot->stats[idx] += (u64)1 << 32; + + ocelot->stats[idx] = (ocelot->stats[idx] & + ~(u64)U32_MAX) + val; + } + } + + cancel_delayed_work(&ocelot->stats_work); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, + OCELOT_STATS_CHECK_DELAY); + + mutex_unlock(&ocelot->stats_lock); +} + +static void ocelot_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + int i; + + /* check and update now */ + ocelot_check_stats(&ocelot->stats_work.work); + + /* Copy all counters */ + for (i = 0; i < ocelot->num_stats; i++) + *data++ = ocelot->stats[port->chip_port * ocelot->num_stats + i]; +} + +static int ocelot_get_sset_count(struct net_device *dev, int sset) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + return ocelot->num_stats; +} + +static const struct ethtool_ops ocelot_ethtool_ops = { + .get_strings = ocelot_get_strings, + .get_ethtool_stats = ocelot_get_ethtool_stats, + .get_sset_count = ocelot_get_sset_count, +}; + +static int ocelot_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + struct ocelot *ocelot = ocelot_port->ocelot; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(ocelot->base_mac); + memcpy(&attr->u.ppid.id, &ocelot->base_mac, + attr->u.ppid.id_len); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, + struct switchdev_trans *trans, + u8 state) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + u32 port_cfg; + int port, i; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (!(BIT(ocelot_port->chip_port) & ocelot->bridge_mask)) + return 0; + + port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, + ocelot_port->chip_port); + + switch (state) { + case BR_STATE_FORWARDING: + ocelot->bridge_fwd_mask |= BIT(ocelot_port->chip_port); + /* Fallthrough */ + case BR_STATE_LEARNING: + port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA; + break; + + default: + port_cfg &= ~ANA_PORT_PORT_CFG_LEARN_ENA; + ocelot->bridge_fwd_mask &= ~BIT(ocelot_port->chip_port); + break; + } + + ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, + ocelot_port->chip_port); + + /* Apply FWD mask. The loop is needed to add/remove the current port as + * a source for the other ports. + */ + for (port = 0; port < ocelot->num_phys_ports; port++) { + if (ocelot->bridge_fwd_mask & BIT(port)) { + unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(port); + + for (i = 0; i < ocelot->num_phys_ports; i++) { + unsigned long bond_mask = ocelot->lags[i]; + + if (!bond_mask) + continue; + + if (bond_mask & BIT(port)) { + mask &= ~bond_mask; + break; + } + } + + ocelot_write_rix(ocelot, + BIT(ocelot->num_phys_ports) | mask, + ANA_PGID_PGID, PGID_SRC + port); + } else { + /* Only the CPU port, this is compatible with link + * aggregation. + */ + ocelot_write_rix(ocelot, + BIT(ocelot->num_phys_ports), + ANA_PGID_PGID, PGID_SRC + port); + } + } + + return 0; +} + +static void ocelot_port_attr_ageing_set(struct ocelot_port *ocelot_port, + unsigned long ageing_clock_t) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + + ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(ageing_time / 2), + ANA_AUTOAGE); +} + +static void ocelot_port_attr_mc_set(struct ocelot_port *port, bool mc) +{ + struct ocelot *ocelot = port->ocelot; + u32 val = ocelot_read_gix(ocelot, ANA_PORT_CPU_FWD_CFG, + port->chip_port); + + if (mc) + val |= ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA; + else + val &= ~(ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA); + + ocelot_write_gix(ocelot, val, ANA_PORT_CPU_FWD_CFG, port->chip_port); +} + +static int ocelot_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ocelot_port_attr_stp_state_set(ocelot_port, trans, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + ocelot_port_attr_ageing_set(ocelot_port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + ocelot_port_attr_mc_set(ocelot_port, !attr->u.mc_disabled); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, + const unsigned char *addr, + u16 vid) +{ + struct ocelot_multicast *mc; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (ether_addr_equal(mc->addr, addr) && mc->vid == vid) + return mc; + } + + return NULL; +} + +static int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + struct ocelot_multicast *mc; + unsigned char addr[ETH_ALEN]; + u16 vid = mdb->vid; + bool new = false; + + if (!vid) + vid = 1; + + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); + if (!mc) { + mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); + if (!mc) + return -ENOMEM; + + memcpy(mc->addr, mdb->addr, ETH_ALEN); + mc->vid = vid; + + list_add_tail(&mc->list, &ocelot->multicast); + new = true; + } + + memcpy(addr, mc->addr, ETH_ALEN); + addr[0] = 0; + + if (!new) { + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + ocelot_mact_forget(ocelot, addr, vid); + } + + mc->ports |= BIT(port->chip_port); + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + + return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); +} + +static int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port *port = netdev_priv(dev); + struct ocelot *ocelot = port->ocelot; + struct ocelot_multicast *mc; + unsigned char addr[ETH_ALEN]; + u16 vid = mdb->vid; + + if (!vid) + vid = 1; + + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); + if (!mc) + return -ENOENT; + + memcpy(addr, mc->addr, ETH_ALEN); + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + addr[0] = 0; + ocelot_mact_forget(ocelot, addr, vid); + + mc->ports &= ~BIT(port->chip_port); + if (!mc->ports) { + list_del(&mc->list); + devm_kfree(ocelot->dev, mc); + return 0; + } + + addr[2] = mc->ports << 0; + addr[1] = mc->ports << 8; + + return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); +} + +static int ocelot_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans) +{ + int ret = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), + trans); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int ocelot_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + int ret = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static const struct switchdev_ops ocelot_port_switchdev_ops = { + .switchdev_port_attr_get = ocelot_port_attr_get, + .switchdev_port_attr_set = ocelot_port_attr_set, + .switchdev_port_obj_add = ocelot_port_obj_add, + .switchdev_port_obj_del = ocelot_port_obj_del, +}; + +static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port, + struct net_device *bridge) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + + if (!ocelot->bridge_mask) { + ocelot->hw_bridge_dev = bridge; + } else { + if (ocelot->hw_bridge_dev != bridge) + /* This is adding the port to a second bridge, this is + * unsupported */ + return -ENODEV; + } + + ocelot->bridge_mask |= BIT(ocelot_port->chip_port); + + return 0; +} + +static void ocelot_port_bridge_leave(struct ocelot_port *ocelot_port, + struct net_device *bridge) +{ + struct ocelot *ocelot = ocelot_port->ocelot; + + ocelot->bridge_mask &= ~BIT(ocelot_port->chip_port); + + if (!ocelot->bridge_mask) + ocelot->hw_bridge_dev = NULL; +} + +/* Checks if the net_device instance given to us originate from our driver. */ +static bool ocelot_netdevice_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &ocelot_port_netdev_ops; +} + +static int ocelot_netdevice_port_event(struct net_device *dev, + unsigned long event, + struct netdev_notifier_changeupper_info *info) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + int err = 0; + + if (!ocelot_netdevice_dev_check(dev)) + return 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + err = ocelot_port_bridge_join(ocelot_port, + info->upper_dev); + else + ocelot_port_bridge_leave(ocelot_port, + info->upper_dev); + } + break; + default: + break; + } + + return err; +} + +static int ocelot_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret; + + if (netif_is_lag_master(dev)) { + struct net_device *slave; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, slave, iter) { + ret = ocelot_netdevice_port_event(slave, event, info); + if (ret) + goto notify; + } + } else { + ret = ocelot_netdevice_port_event(dev, event, info); + } + +notify: + return notifier_from_errno(ret); +} + +struct notifier_block ocelot_netdevice_nb __read_mostly = { + .notifier_call = ocelot_netdevice_event, +}; +EXPORT_SYMBOL(ocelot_netdevice_nb); + +int ocelot_probe_port(struct ocelot *ocelot, u8 port, + void __iomem *regs, + struct phy_device *phy) +{ + struct ocelot_port *ocelot_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct ocelot_port)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, ocelot->dev); + ocelot_port = netdev_priv(dev); + ocelot_port->dev = dev; + ocelot_port->ocelot = ocelot; + ocelot_port->regs = regs; + ocelot_port->chip_port = port; + ocelot_port->phy = phy; + INIT_LIST_HEAD(&ocelot_port->mc); + ocelot->ports[port] = ocelot_port; + + dev->netdev_ops = &ocelot_port_netdev_ops; + dev->ethtool_ops = &ocelot_ethtool_ops; + dev->switchdev_ops = &ocelot_port_switchdev_ops; + + memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); + dev->dev_addr[ETH_ALEN - 1] += port; + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, + ENTRYTYPE_LOCKED); + + err = register_netdev(dev); + if (err) { + dev_err(ocelot->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + + return 0; + +err_register_netdev: + free_netdev(dev); + return err; +} +EXPORT_SYMBOL(ocelot_probe_port); + +int ocelot_init(struct ocelot *ocelot) +{ + u32 port; + int i, cpu = ocelot->num_phys_ports; + char queue_name[32]; + + ocelot->stats = devm_kcalloc(ocelot->dev, + ocelot->num_phys_ports * ocelot->num_stats, + sizeof(u64), GFP_KERNEL); + if (!ocelot->stats) + return -ENOMEM; + + mutex_init(&ocelot->stats_lock); + snprintf(queue_name, sizeof(queue_name), "%s-stats", + dev_name(ocelot->dev)); + ocelot->stats_queue = create_singlethread_workqueue(queue_name); + if (!ocelot->stats_queue) + return -ENOMEM; + + ocelot_mact_init(ocelot); + ocelot_vlan_init(ocelot); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + /* Clear all counters (5 groups) */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) | + SYS_STAT_CFG_STAT_CLEAR_SHOT(0x7f), + SYS_STAT_CFG); + } + + /* Only use S-Tag */ + ocelot_write(ocelot, ETH_P_8021AD, SYS_VLAN_ETYPE_CFG); + + /* Aggregation mode */ + ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA | + ANA_AGGR_CFG_AC_DMAC_ENA | + ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA | + ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG); + + /* Set MAC age time to default value. The entry is aged after + * 2*AGE_PERIOD + */ + ocelot_write(ocelot, + ANA_AUTOAGE_AGE_PERIOD(BR_DEFAULT_AGEING_TIME / 2 / HZ), + ANA_AUTOAGE); + + /* Disable learning for frames discarded by VLAN ingress filtering */ + regmap_field_write(ocelot->regfields[ANA_ADVLEARN_VLAN_CHK], 1); + + /* Setup frame ageing - fixed value "2 sec" - in 6.5 us units */ + ocelot_write(ocelot, SYS_FRM_AGING_AGE_TX_ENA | + SYS_FRM_AGING_MAX_AGE(307692), SYS_FRM_AGING); + + /* Setup flooding PGIDs */ + ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) | + ANA_FLOODING_FLD_BROADCAST(PGID_MC) | + ANA_FLOODING_FLD_UNICAST(PGID_UC), + ANA_FLOODING, 0); + ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) | + ANA_FLOODING_IPMC_FLD_MC6_CTRL(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC4_DATA(PGID_MCIPV4) | + ANA_FLOODING_IPMC_FLD_MC4_CTRL(PGID_MC), + ANA_FLOODING_IPMC); + + for (port = 0; port < ocelot->num_phys_ports; port++) { + /* Transmit the frame to the local port. */ + ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); + /* Do not forward BPDU frames to the front ports. */ + ocelot_write_gix(ocelot, + ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), + ANA_PORT_CPU_FWD_BPDU_CFG, + port); + /* Ensure bridging is disabled */ + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port); + } + + /* Configure and enable the CPU port. */ + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu); + ocelot_write_rix(ocelot, BIT(cpu), ANA_PGID_PGID, PGID_CPU); + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(cpu), + ANA_PORT_PORT_CFG, cpu); + + /* Allow broadcast MAC frames. */ + for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) { + u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); + + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); + } + ocelot_write_rix(ocelot, + ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)), + ANA_PGID_PGID, PGID_MC); + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4); + ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6); + + /* CPU port Injection/Extraction configuration */ + ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE, cpu); + ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(1) | + SYS_PORT_MODE_INCL_INJ_HDR(1), SYS_PORT_MODE, cpu); + /* Allow manual injection via DEVCPU_QS registers, and byte swap these + * registers endianness. + */ + ocelot_write_rix(ocelot, QS_INJ_GRP_CFG_BYTE_SWAP | + QS_INJ_GRP_CFG_MODE(1), QS_INJ_GRP_CFG, 0); + ocelot_write_rix(ocelot, QS_XTR_GRP_CFG_BYTE_SWAP | + QS_XTR_GRP_CFG_MODE(1), QS_XTR_GRP_CFG, 0); + ocelot_write(ocelot, ANA_CPUQ_CFG_CPUQ_MIRROR(2) | + ANA_CPUQ_CFG_CPUQ_LRN(2) | + ANA_CPUQ_CFG_CPUQ_MAC_COPY(2) | + ANA_CPUQ_CFG_CPUQ_SRC_COPY(2) | + ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(2) | + ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(6) | + ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(6) | + ANA_CPUQ_CFG_CPUQ_IGMP(6) | + ANA_CPUQ_CFG_CPUQ_MLD(6), ANA_CPUQ_CFG); + for (i = 0; i < 16; i++) + ocelot_write_rix(ocelot, ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(6) | + ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), + ANA_CPUQ_8021_CFG, i); + + INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); + queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, + OCELOT_STATS_CHECK_DELAY); + return 0; +} +EXPORT_SYMBOL(ocelot_init); + +void ocelot_deinit(struct ocelot *ocelot) +{ + destroy_workqueue(ocelot->stats_queue); + mutex_destroy(&ocelot->stats_lock); +} +EXPORT_SYMBOL(ocelot_deinit); + +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h new file mode 100644 index 000000000000..097bd12a10d4 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -0,0 +1,572 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_H_ +#define _MSCC_OCELOT_H_ + +#include <linux/bitops.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "ocelot_ana.h" +#include "ocelot_dev.h" +#include "ocelot_hsio.h" +#include "ocelot_qsys.h" +#include "ocelot_rew.h" +#include "ocelot_sys.h" +#include "ocelot_qs.h" + +#define PGID_AGGR 64 +#define PGID_SRC 80 + +/* Reserved PGIDs */ +#define PGID_CPU (PGID_AGGR - 5) +#define PGID_UC (PGID_AGGR - 4) +#define PGID_MC (PGID_AGGR - 3) +#define PGID_MCIPV4 (PGID_AGGR - 2) +#define PGID_MCIPV6 (PGID_AGGR - 1) + +#define OCELOT_BUFFER_CELL_SZ 60 + +#define OCELOT_STATS_CHECK_DELAY (2 * HZ) + +#define IFH_LEN 4 + +struct frame_info { + u32 len; + u16 port; + u16 vid; + u8 cpuq; + u8 tag_type; +}; + +#define IFH_INJ_BYPASS BIT(31) +#define IFH_INJ_POP_CNT_DISABLE (3 << 28) + +#define IFH_TAG_TYPE_C 0 +#define IFH_TAG_TYPE_S 1 + +#define OCELOT_SPEED_2500 0 +#define OCELOT_SPEED_1000 1 +#define OCELOT_SPEED_100 2 +#define OCELOT_SPEED_10 3 + +#define TARGET_OFFSET 24 +#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0) +#define REG(reg, offset) [reg & REG_MASK] = offset + +enum ocelot_target { + ANA = 1, + QS, + QSYS, + REW, + SYS, + HSIO, + TARGET_MAX, +}; + +enum ocelot_reg { + ANA_ADVLEARN = ANA << TARGET_OFFSET, + ANA_VLANMASK, + ANA_PORT_B_DOMAIN, + ANA_ANAGEFIL, + ANA_ANEVENTS, + ANA_STORMLIMIT_BURST, + ANA_STORMLIMIT_CFG, + ANA_ISOLATED_PORTS, + ANA_COMMUNITY_PORTS, + ANA_AUTOAGE, + ANA_MACTOPTIONS, + ANA_LEARNDISC, + ANA_AGENCTRL, + ANA_MIRRORPORTS, + ANA_EMIRRORPORTS, + ANA_FLOODING, + ANA_FLOODING_IPMC, + ANA_SFLOW_CFG, + ANA_PORT_MODE, + ANA_CUT_THRU_CFG, + ANA_PGID_PGID, + ANA_TABLES_ANMOVED, + ANA_TABLES_MACHDATA, + ANA_TABLES_MACLDATA, + ANA_TABLES_STREAMDATA, + ANA_TABLES_MACACCESS, + ANA_TABLES_MACTINDX, + ANA_TABLES_VLANACCESS, + ANA_TABLES_VLANTIDX, + ANA_TABLES_ISDXACCESS, + ANA_TABLES_ISDXTIDX, + ANA_TABLES_ENTRYLIM, + ANA_TABLES_PTP_ID_HIGH, + ANA_TABLES_PTP_ID_LOW, + ANA_TABLES_STREAMACCESS, + ANA_TABLES_STREAMTIDX, + ANA_TABLES_SEQ_HISTORY, + ANA_TABLES_SEQ_MASK, + ANA_TABLES_SFID_MASK, + ANA_TABLES_SFIDACCESS, + ANA_TABLES_SFIDTIDX, + ANA_MSTI_STATE, + ANA_OAM_UPM_LM_CNT, + ANA_SG_ACCESS_CTRL, + ANA_SG_CONFIG_REG_1, + ANA_SG_CONFIG_REG_2, + ANA_SG_CONFIG_REG_3, + ANA_SG_CONFIG_REG_4, + ANA_SG_CONFIG_REG_5, + ANA_SG_GCL_GS_CONFIG, + ANA_SG_GCL_TI_CONFIG, + ANA_SG_STATUS_REG_1, + ANA_SG_STATUS_REG_2, + ANA_SG_STATUS_REG_3, + ANA_PORT_VLAN_CFG, + ANA_PORT_DROP_CFG, + ANA_PORT_QOS_CFG, + ANA_PORT_VCAP_CFG, + ANA_PORT_VCAP_S1_KEY_CFG, + ANA_PORT_VCAP_S2_CFG, + ANA_PORT_PCP_DEI_MAP, + ANA_PORT_CPU_FWD_CFG, + ANA_PORT_CPU_FWD_BPDU_CFG, + ANA_PORT_CPU_FWD_GARP_CFG, + ANA_PORT_CPU_FWD_CCM_CFG, + ANA_PORT_PORT_CFG, + ANA_PORT_POL_CFG, + ANA_PORT_PTP_CFG, + ANA_PORT_PTP_DLY1_CFG, + ANA_PORT_PTP_DLY2_CFG, + ANA_PORT_SFID_CFG, + ANA_PFC_PFC_CFG, + ANA_PFC_PFC_TIMER, + ANA_IPT_OAM_MEP_CFG, + ANA_IPT_IPT, + ANA_PPT_PPT, + ANA_FID_MAP_FID_MAP, + ANA_AGGR_CFG, + ANA_CPUQ_CFG, + ANA_CPUQ_CFG2, + ANA_CPUQ_8021_CFG, + ANA_DSCP_CFG, + ANA_DSCP_REWR_CFG, + ANA_VCAP_RNG_TYPE_CFG, + ANA_VCAP_RNG_VAL_CFG, + ANA_VRAP_CFG, + ANA_VRAP_HDR_DATA, + ANA_VRAP_HDR_MASK, + ANA_DISCARD_CFG, + ANA_FID_CFG, + ANA_POL_PIR_CFG, + ANA_POL_CIR_CFG, + ANA_POL_MODE_CFG, + ANA_POL_PIR_STATE, + ANA_POL_CIR_STATE, + ANA_POL_STATE, + ANA_POL_FLOWC, + ANA_POL_HYST, + ANA_POL_MISC_CFG, + QS_XTR_GRP_CFG = QS << TARGET_OFFSET, + QS_XTR_RD, + QS_XTR_FRM_PRUNING, + QS_XTR_FLUSH, + QS_XTR_DATA_PRESENT, + QS_XTR_CFG, + QS_INJ_GRP_CFG, + QS_INJ_WR, + QS_INJ_CTRL, + QS_INJ_STATUS, + QS_INJ_ERR, + QS_INH_DBG, + QSYS_PORT_MODE = QSYS << TARGET_OFFSET, + QSYS_SWITCH_PORT_MODE, + QSYS_STAT_CNT_CFG, + QSYS_EEE_CFG, + QSYS_EEE_THRES, + QSYS_IGR_NO_SHARING, + QSYS_EGR_NO_SHARING, + QSYS_SW_STATUS, + QSYS_EXT_CPU_CFG, + QSYS_PAD_CFG, + QSYS_CPU_GROUP_MAP, + QSYS_QMAP, + QSYS_ISDX_SGRP, + QSYS_TIMED_FRAME_ENTRY, + QSYS_TFRM_MISC, + QSYS_TFRM_PORT_DLY, + QSYS_TFRM_TIMER_CFG_1, + QSYS_TFRM_TIMER_CFG_2, + QSYS_TFRM_TIMER_CFG_3, + QSYS_TFRM_TIMER_CFG_4, + QSYS_TFRM_TIMER_CFG_5, + QSYS_TFRM_TIMER_CFG_6, + QSYS_TFRM_TIMER_CFG_7, + QSYS_TFRM_TIMER_CFG_8, + QSYS_RED_PROFILE, + QSYS_RES_QOS_MODE, + QSYS_RES_CFG, + QSYS_RES_STAT, + QSYS_EGR_DROP_MODE, + QSYS_EQ_CTRL, + QSYS_EVENTS_CORE, + QSYS_QMAXSDU_CFG_0, + QSYS_QMAXSDU_CFG_1, + QSYS_QMAXSDU_CFG_2, + QSYS_QMAXSDU_CFG_3, + QSYS_QMAXSDU_CFG_4, + QSYS_QMAXSDU_CFG_5, + QSYS_QMAXSDU_CFG_6, + QSYS_QMAXSDU_CFG_7, + QSYS_PREEMPTION_CFG, + QSYS_CIR_CFG, + QSYS_EIR_CFG, + QSYS_SE_CFG, + QSYS_SE_DWRR_CFG, + QSYS_SE_CONNECT, + QSYS_SE_DLB_SENSE, + QSYS_CIR_STATE, + QSYS_EIR_STATE, + QSYS_SE_STATE, + QSYS_HSCH_MISC_CFG, + QSYS_TAG_CONFIG, + QSYS_TAS_PARAM_CFG_CTRL, + QSYS_PORT_MAX_SDU, + QSYS_PARAM_CFG_REG_1, + QSYS_PARAM_CFG_REG_2, + QSYS_PARAM_CFG_REG_3, + QSYS_PARAM_CFG_REG_4, + QSYS_PARAM_CFG_REG_5, + QSYS_GCL_CFG_REG_1, + QSYS_GCL_CFG_REG_2, + QSYS_PARAM_STATUS_REG_1, + QSYS_PARAM_STATUS_REG_2, + QSYS_PARAM_STATUS_REG_3, + QSYS_PARAM_STATUS_REG_4, + QSYS_PARAM_STATUS_REG_5, + QSYS_PARAM_STATUS_REG_6, + QSYS_PARAM_STATUS_REG_7, + QSYS_PARAM_STATUS_REG_8, + QSYS_PARAM_STATUS_REG_9, + QSYS_GCL_STATUS_REG_1, + QSYS_GCL_STATUS_REG_2, + REW_PORT_VLAN_CFG = REW << TARGET_OFFSET, + REW_TAG_CFG, + REW_PORT_CFG, + REW_DSCP_CFG, + REW_PCP_DEI_QOS_MAP_CFG, + REW_PTP_CFG, + REW_PTP_DLY1_CFG, + REW_RED_TAG_CFG, + REW_DSCP_REMAP_DP1_CFG, + REW_DSCP_REMAP_CFG, + REW_STAT_CFG, + REW_REW_STICKY, + REW_PPT, + SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET, + SYS_COUNT_RX_UNICAST, + SYS_COUNT_RX_MULTICAST, + SYS_COUNT_RX_BROADCAST, + SYS_COUNT_RX_SHORTS, + SYS_COUNT_RX_FRAGMENTS, + SYS_COUNT_RX_JABBERS, + SYS_COUNT_RX_CRC_ALIGN_ERRS, + SYS_COUNT_RX_SYM_ERRS, + SYS_COUNT_RX_64, + SYS_COUNT_RX_65_127, + SYS_COUNT_RX_128_255, + SYS_COUNT_RX_256_1023, + SYS_COUNT_RX_1024_1526, + SYS_COUNT_RX_1527_MAX, + SYS_COUNT_RX_PAUSE, + SYS_COUNT_RX_CONTROL, + SYS_COUNT_RX_LONGS, + SYS_COUNT_RX_CLASSIFIED_DROPS, + SYS_COUNT_TX_OCTETS, + SYS_COUNT_TX_UNICAST, + SYS_COUNT_TX_MULTICAST, + SYS_COUNT_TX_BROADCAST, + SYS_COUNT_TX_COLLISION, + SYS_COUNT_TX_DROPS, + SYS_COUNT_TX_PAUSE, + SYS_COUNT_TX_64, + SYS_COUNT_TX_65_127, + SYS_COUNT_TX_128_511, + SYS_COUNT_TX_512_1023, + SYS_COUNT_TX_1024_1526, + SYS_COUNT_TX_1527_MAX, + SYS_COUNT_TX_AGING, + SYS_RESET_CFG, + SYS_SR_ETYPE_CFG, + SYS_VLAN_ETYPE_CFG, + SYS_PORT_MODE, + SYS_FRONT_PORT_MODE, + SYS_FRM_AGING, + SYS_STAT_CFG, + SYS_SW_STATUS, + SYS_MISC_CFG, + SYS_REW_MAC_HIGH_CFG, + SYS_REW_MAC_LOW_CFG, + SYS_TIMESTAMP_OFFSET, + SYS_CMID, + SYS_PAUSE_CFG, + SYS_PAUSE_TOT_CFG, + SYS_ATOP, + SYS_ATOP_TOT_CFG, + SYS_MAC_FC_CFG, + SYS_MMGT, + SYS_MMGT_FAST, + SYS_EVENTS_DIF, + SYS_EVENTS_CORE, + SYS_CNT, + SYS_PTP_STATUS, + SYS_PTP_TXSTAMP, + SYS_PTP_NXT, + SYS_PTP_CFG, + SYS_RAM_INIT, + SYS_CM_ADDR, + SYS_CM_DATA_WR, + SYS_CM_DATA_RD, + SYS_CM_OP, + SYS_CM_DATA, + HSIO_PLL5G_CFG0 = HSIO << TARGET_OFFSET, + HSIO_PLL5G_CFG1, + HSIO_PLL5G_CFG2, + HSIO_PLL5G_CFG3, + HSIO_PLL5G_CFG4, + HSIO_PLL5G_CFG5, + HSIO_PLL5G_CFG6, + HSIO_PLL5G_STATUS0, + HSIO_PLL5G_STATUS1, + HSIO_PLL5G_BIST_CFG0, + HSIO_PLL5G_BIST_CFG1, + HSIO_PLL5G_BIST_CFG2, + HSIO_PLL5G_BIST_STAT0, + HSIO_PLL5G_BIST_STAT1, + HSIO_RCOMP_CFG0, + HSIO_RCOMP_STATUS, + HSIO_SYNC_ETH_CFG, + HSIO_SYNC_ETH_PLL_CFG, + HSIO_S1G_DES_CFG, + HSIO_S1G_IB_CFG, + HSIO_S1G_OB_CFG, + HSIO_S1G_SER_CFG, + HSIO_S1G_COMMON_CFG, + HSIO_S1G_PLL_CFG, + HSIO_S1G_PLL_STATUS, + HSIO_S1G_DFT_CFG0, + HSIO_S1G_DFT_CFG1, + HSIO_S1G_DFT_CFG2, + HSIO_S1G_TP_CFG, + HSIO_S1G_RC_PLL_BIST_CFG, + HSIO_S1G_MISC_CFG, + HSIO_S1G_DFT_STATUS, + HSIO_S1G_MISC_STATUS, + HSIO_MCB_S1G_ADDR_CFG, + HSIO_S6G_DIG_CFG, + HSIO_S6G_DFT_CFG0, + HSIO_S6G_DFT_CFG1, + HSIO_S6G_DFT_CFG2, + HSIO_S6G_TP_CFG0, + HSIO_S6G_TP_CFG1, + HSIO_S6G_RC_PLL_BIST_CFG, + HSIO_S6G_MISC_CFG, + HSIO_S6G_OB_ANEG_CFG, + HSIO_S6G_DFT_STATUS, + HSIO_S6G_ERR_CNT, + HSIO_S6G_MISC_STATUS, + HSIO_S6G_DES_CFG, + HSIO_S6G_IB_CFG, + HSIO_S6G_IB_CFG1, + HSIO_S6G_IB_CFG2, + HSIO_S6G_IB_CFG3, + HSIO_S6G_IB_CFG4, + HSIO_S6G_IB_CFG5, + HSIO_S6G_OB_CFG, + HSIO_S6G_OB_CFG1, + HSIO_S6G_SER_CFG, + HSIO_S6G_COMMON_CFG, + HSIO_S6G_PLL_CFG, + HSIO_S6G_ACJTAG_CFG, + HSIO_S6G_GP_CFG, + HSIO_S6G_IB_STATUS0, + HSIO_S6G_IB_STATUS1, + HSIO_S6G_ACJTAG_STATUS, + HSIO_S6G_PLL_STATUS, + HSIO_S6G_REVID, + HSIO_MCB_S6G_ADDR_CFG, + HSIO_HW_CFG, + HSIO_HW_QSGMII_CFG, + HSIO_HW_QSGMII_STAT, + HSIO_CLK_CFG, + HSIO_TEMP_SENSOR_CTRL, + HSIO_TEMP_SENSOR_CFG, + HSIO_TEMP_SENSOR_STAT, +}; + +enum ocelot_regfield { + ANA_ADVLEARN_VLAN_CHK, + ANA_ADVLEARN_LEARN_MIRROR, + ANA_ANEVENTS_FLOOD_DISCARD, + ANA_ANEVENTS_MSTI_DROP, + ANA_ANEVENTS_ACLKILL, + ANA_ANEVENTS_ACLUSED, + ANA_ANEVENTS_AUTOAGE, + ANA_ANEVENTS_VS2TTL1, + ANA_ANEVENTS_STORM_DROP, + ANA_ANEVENTS_LEARN_DROP, + ANA_ANEVENTS_AGED_ENTRY, + ANA_ANEVENTS_CPU_LEARN_FAILED, + ANA_ANEVENTS_AUTO_LEARN_FAILED, + ANA_ANEVENTS_LEARN_REMOVE, + ANA_ANEVENTS_AUTO_LEARNED, + ANA_ANEVENTS_AUTO_MOVED, + ANA_ANEVENTS_DROPPED, + ANA_ANEVENTS_CLASSIFIED_DROP, + ANA_ANEVENTS_CLASSIFIED_COPY, + ANA_ANEVENTS_VLAN_DISCARD, + ANA_ANEVENTS_FWD_DISCARD, + ANA_ANEVENTS_MULTICAST_FLOOD, + ANA_ANEVENTS_UNICAST_FLOOD, + ANA_ANEVENTS_DEST_KNOWN, + ANA_ANEVENTS_BUCKET3_MATCH, + ANA_ANEVENTS_BUCKET2_MATCH, + ANA_ANEVENTS_BUCKET1_MATCH, + ANA_ANEVENTS_BUCKET0_MATCH, + ANA_ANEVENTS_CPU_OPERATION, + ANA_ANEVENTS_DMAC_LOOKUP, + ANA_ANEVENTS_SMAC_LOOKUP, + ANA_ANEVENTS_SEQ_GEN_ERR_0, + ANA_ANEVENTS_SEQ_GEN_ERR_1, + ANA_TABLES_MACACCESS_B_DOM, + ANA_TABLES_MACTINDX_BUCKET, + ANA_TABLES_MACTINDX_M_INDEX, + QSYS_TIMED_FRAME_ENTRY_TFRM_VLD, + QSYS_TIMED_FRAME_ENTRY_TFRM_FP, + QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO, + QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL, + QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T, + SYS_RESET_CFG_CORE_ENA, + SYS_RESET_CFG_MEM_ENA, + SYS_RESET_CFG_MEM_INIT, + REGFIELD_MAX +}; + +struct ocelot_multicast { + struct list_head list; + unsigned char addr[ETH_ALEN]; + u16 vid; + u16 ports; +}; + +struct ocelot_port; + +struct ocelot_stat_layout { + u32 offset; + char name[ETH_GSTRING_LEN]; +}; + +struct ocelot { + struct device *dev; + + struct regmap *targets[TARGET_MAX]; + struct regmap_field *regfields[REGFIELD_MAX]; + const u32 *const *map; + const struct ocelot_stat_layout *stats_layout; + unsigned int num_stats; + + u8 base_mac[ETH_ALEN]; + + struct net_device *hw_bridge_dev; + u16 bridge_mask; + u16 bridge_fwd_mask; + + struct workqueue_struct *ocelot_owq; + + int shared_queue_sz; + + u8 num_phys_ports; + u8 num_cpu_ports; + struct ocelot_port **ports; + + u16 lags[16]; + + /* Keep track of the vlan port masks */ + u32 vlan_mask[VLAN_N_VID]; + + struct list_head multicast; + + /* Workqueue to check statistics for overflow with its lock */ + struct mutex stats_lock; + u64 *stats; + struct delayed_work stats_work; + struct workqueue_struct *stats_queue; +}; + +struct ocelot_port { + struct net_device *dev; + struct ocelot *ocelot; + struct phy_device *phy; + void __iomem *regs; + u8 chip_port; + /* Keep a track of the mc addresses added to the mac table, so that they + * can be removed when needed. + */ + struct list_head mc; + + /* Ingress default VLAN (pvid) */ + u16 pvid; + + /* Egress default VLAN (vid) */ + u16 vid; + + u8 vlan_aware; + + u64 *stats; +}; + +u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); +#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi)) +#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri)) +#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0) + +void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset); +#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi)) +#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) +#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) + +void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask, + u32 offset); +#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) +#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri)) +#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0) + +u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); +void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); + +int ocelot_regfields_init(struct ocelot *ocelot, + const struct reg_field *const regfields); +struct regmap *ocelot_io_platform_init(struct ocelot *ocelot, + struct platform_device *pdev, + const char *name); + +#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) +#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) + +int ocelot_init(struct ocelot *ocelot); +void ocelot_deinit(struct ocelot *ocelot); +int ocelot_chip_init(struct ocelot *ocelot); +int ocelot_probe_port(struct ocelot *ocelot, u8 port, + void __iomem *regs, + struct phy_device *phy); + +extern struct notifier_block ocelot_netdevice_nb; + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_ana.h b/drivers/net/ethernet/mscc/ocelot_ana.h new file mode 100644 index 000000000000..841c6ec22b64 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_ana.h @@ -0,0 +1,625 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_ANA_H_ +#define _MSCC_OCELOT_ANA_H_ + +#define ANA_ANAGEFIL_B_DOM_EN BIT(22) +#define ANA_ANAGEFIL_B_DOM_VAL BIT(21) +#define ANA_ANAGEFIL_AGE_LOCKED BIT(20) +#define ANA_ANAGEFIL_PID_EN BIT(19) +#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14)) +#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14) +#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14) +#define ANA_ANAGEFIL_VID_EN BIT(13) +#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0)) +#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0) + +#define ANA_STORMLIMIT_CFG_RSZ 0x4 + +#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3)) +#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3) +#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3) +#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2) +#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0)) +#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0) + +#define ANA_AUTOAGE_AGE_FAST BIT(21) +#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1)) +#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1) +#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1) +#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0) + +#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1) +#define ANA_MACTOPTIONS_SHADOW BIT(0) + +#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12)) +#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12) +#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12) +#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11) +#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10) +#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9) +#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8) +#define ANA_AGENCTRL_MIRROR_CPU BIT(7) +#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6) +#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5) +#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4) +#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3) +#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2) +#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1) +#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0) + +#define ANA_FLOODING_RSZ 0x4 + +#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12)) +#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12) +#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6)) +#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6) +#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0)) +#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0) + +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18)) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12)) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6)) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0)) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0) + +#define ANA_SFLOW_CFG_RSZ 0x4 + +#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2)) +#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2) +#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2) +#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1) +#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0) + +#define ANA_PORT_MODE_RSZ 0x4 + +#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3) +#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1)) +#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1) +#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1) +#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0) + +#define ANA_CUT_THRU_CFG_RSZ 0x4 + +#define ANA_PGID_PGID_RSZ 0x4 + +#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0)) +#define ANA_PGID_PGID_PGID_M GENMASK(11, 0) +#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27)) +#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27) +#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27) + +#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16)) +#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16) +#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16) +#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0)) +#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0) + +#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16) +#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9)) +#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9) +#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9) +#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8) +#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0)) +#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0) + +#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15) +#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14) +#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13) +#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12) +#define ANA_TABLES_MACACCESS_VALID BIT(11) +#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9)) +#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9) +#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9) +#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3)) +#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3) +#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3) +#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0)) +#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0) +#define MACACCESS_CMD_IDLE 0 +#define MACACCESS_CMD_LEARN 1 +#define MACACCESS_CMD_FORGET 2 +#define MACACCESS_CMD_AGE 3 +#define MACACCESS_CMD_GET_NEXT 4 +#define MACACCESS_CMD_INIT 5 +#define MACACCESS_CMD_READ 6 +#define MACACCESS_CMD_WRITE 7 + +#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2)) +#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2) +#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2) +#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0) +#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0 +#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2 +#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3 + +#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17) +#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16) +#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15) +#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14) +#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13) +#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12) +#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0)) +#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0) + +#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2)) +#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2) +#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2) +#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0) + +#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21)) +#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21) +#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21) +#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15)) +#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15) +#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15) +#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14) +#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10) +#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0)) +#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0) + +#define ANA_TABLES_ENTRYLIM_RSZ 0x4 + +#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14)) +#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14) +#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14) +#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0)) +#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0) + +#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4)) +#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4) +#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4) +#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3) +#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2) +#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0) + +#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30)) +#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30) +#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30) +#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16)) +#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16) +#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16) +#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14) +#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8)) +#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8) +#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8) +#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7) +#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6) +#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5) +#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0)) +#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0) + +#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16)) +#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16) +#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16) +#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0)) +#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0) + +#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1)) +#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1) +#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1) +#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0) + +#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22) +#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19)) +#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19) +#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18) +#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2)) +#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2) +#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2) +#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0)) +#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0) + +#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26) +#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18)) +#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18) +#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18) +#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17) +#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8)) +#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8) +#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8) +#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0)) +#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0) + +#define ANA_MSTI_STATE_RSZ 0x4 + +#define ANA_OAM_UPM_LM_CNT_RSZ 0x4 + +#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0)) +#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0) +#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28) + +#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0) +#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16)) +#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16) +#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20) +#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24)) +#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24) +#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24) +#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28) + +#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4 + +#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0)) +#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0) +#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4) + +#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4 + +#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0) +#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16) +#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20)) +#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20) +#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24) + +#define ANA_PORT_VLAN_CFG_GSZ 0x100 + +#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21) +#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20) +#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18)) +#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18) +#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18) +#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17) +#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16) +#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15) +#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12)) +#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12) +#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12) +#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0)) +#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0) + +#define ANA_PORT_DROP_CFG_GSZ 0x100 + +#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6) +#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5) +#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4) +#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3) +#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2) +#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1) +#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0) + +#define ANA_PORT_QOS_CFG_GSZ 0x100 + +#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8) +#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5)) +#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5) +#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4) +#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3) +#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2) +#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0)) +#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0) + +#define ANA_PORT_VCAP_CFG_GSZ 0x100 + +#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14) +#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11)) +#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11) +#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11) +#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8)) +#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8) +#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0)) +#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0) + +#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100 +#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4 + +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4)) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2)) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0)) +#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0) + +#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100 + +#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17)) +#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17) +#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17) +#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15)) +#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15) +#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15) +#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14) +#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12)) +#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12) +#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12) +#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10)) +#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10) +#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8)) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6)) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6) +#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6) +#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2)) +#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2) +#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2) +#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0)) +#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0) + +#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100 +#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4 + +#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3) +#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0)) +#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0) + +#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7) +#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6) +#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5) +#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4) +#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3) +#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2) +#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1) +#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0) + +#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0)) +#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0) + +#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0)) +#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0) + +#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100 + +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0)) +#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0) + +#define ANA_PORT_PORT_CFG_GSZ 0x100 + +#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15) +#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14) +#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13) +#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12) +#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11) +#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10) +#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9) +#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8) +#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7) +#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6) +#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2)) +#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2) +#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2) +#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1) +#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0) + +#define ANA_PORT_POL_CFG_GSZ 0x100 + +#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19) +#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18) +#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17) +#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9)) +#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9) +#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9) +#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0)) +#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0) + +#define ANA_PORT_PTP_CFG_GSZ 0x100 + +#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0) + +#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100 + +#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100 + +#define ANA_PORT_SFID_CFG_GSZ 0x100 +#define ANA_PORT_SFID_CFG_RSZ 0x4 + +#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8) +#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0)) +#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0) + +#define ANA_PFC_PFC_CFG_GSZ 0x40 + +#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2)) +#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2) +#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2) +#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0)) +#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0) + +#define ANA_PFC_PFC_TIMER_GSZ 0x40 +#define ANA_PFC_PFC_TIMER_RSZ 0x4 + +#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8 + +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6)) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1)) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1) +#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0) + +#define ANA_IPT_IPT_GSZ 0x8 + +#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15)) +#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15) +#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15) +#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7)) +#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7) +#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7) +#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0)) +#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0) + +#define ANA_PPT_PPT_RSZ 0x4 + +#define ANA_FID_MAP_FID_MAP_RSZ 0x4 + +#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6)) +#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6) +#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0)) +#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0) + +#define ANA_AGGR_CFG_AC_RND_ENA BIT(7) +#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6) +#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1) +#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0) + +#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27)) +#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27) +#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27) +#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24)) +#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24) +#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24) +#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21)) +#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21) +#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21) +#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18)) +#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18) +#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18) +#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15)) +#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15) +#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15) +#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12)) +#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12) +#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12) +#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9)) +#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9) +#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9) +#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6)) +#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6) +#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3)) +#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3) +#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3) +#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0)) +#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0) + +#define ANA_CPUQ_8021_CFG_RSZ 0x4 + +#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6)) +#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6) +#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3)) +#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3) +#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3) +#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0)) +#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0) + +#define ANA_DSCP_CFG_RSZ 0x4 + +#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11) +#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8)) +#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8) +#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2)) +#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2) +#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2) +#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1) +#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0) + +#define ANA_DSCP_REWR_CFG_RSZ 0x4 + +#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4 + +#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4 + +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16)) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0)) +#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0) + +#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12) +#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0)) +#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0) + +#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3) +#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2) +#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1) +#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0) + +#define ANA_FID_CFG_VID_MC_ENA BIT(0) + +#define ANA_POL_PIR_CFG_GSZ 0x20 + +#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6)) +#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6) +#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6) +#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0)) +#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0) + +#define ANA_POL_CIR_CFG_GSZ 0x20 + +#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6)) +#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6) +#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6) +#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0)) +#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0) + +#define ANA_POL_MODE_CFG_GSZ 0x20 + +#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5)) +#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5) +#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3)) +#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3) +#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2) +#define ANA_POL_MODE_CFG_CIR_ENA BIT(1) +#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0) + +#define ANA_POL_PIR_STATE_GSZ 0x20 + +#define ANA_POL_CIR_STATE_GSZ 0x20 + +#define ANA_POL_STATE_GSZ 0x20 + +#define ANA_POL_FLOWC_RSZ 0x4 + +#define ANA_POL_FLOWC_POL_FLOWC BIT(0) + +#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4)) +#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4) +#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4) +#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0)) +#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0) + +#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1) +#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c new file mode 100644 index 000000000000..18df7d934e81 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_mdio.h> +#include <linux/of_platform.h> +#include <linux/skbuff.h> + +#include "ocelot.h" + +static int ocelot_parse_ifh(u32 *ifh, struct frame_info *info) +{ + int i; + u8 llen, wlen; + + /* The IFH is in network order, switch to CPU order */ + for (i = 0; i < IFH_LEN; i++) + ifh[i] = ntohl((__force __be32)ifh[i]); + + wlen = (ifh[1] >> 7) & 0xff; + llen = (ifh[1] >> 15) & 0x3f; + info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80; + + info->port = (ifh[2] & GENMASK(14, 11)) >> 11; + + info->cpuq = (ifh[3] & GENMASK(27, 20)) >> 20; + info->tag_type = (ifh[3] & GENMASK(16, 16)) >> 16; + info->vid = ifh[3] & GENMASK(11, 0); + + return 0; +} + +static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh, + u32 *rval) +{ + u32 val; + u32 bytes_valid; + + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_NOT_READY) { + if (ifh) + return -EIO; + + do { + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + } while (val == XTR_NOT_READY); + } + + switch (val) { + case XTR_ABORT: + return -EIO; + case XTR_EOF_0: + case XTR_EOF_1: + case XTR_EOF_2: + case XTR_EOF_3: + case XTR_PRUNED: + bytes_valid = XTR_VALID_BYTES(val); + val = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + if (val == XTR_ESCAPE) + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + else + *rval = val; + + return bytes_valid; + case XTR_ESCAPE: + *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp); + + return 4; + default: + *rval = val; + + return 4; + } +} + +static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) +{ + struct ocelot *ocelot = arg; + int i = 0, grp = 0; + int err = 0; + + if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))) + return IRQ_NONE; + + do { + struct sk_buff *skb; + struct net_device *dev; + u32 *buf; + int sz, len; + u32 ifh[4]; + u32 val; + struct frame_info info; + + for (i = 0; i < IFH_LEN; i++) { + err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]); + if (err != 4) + break; + } + + if (err != 4) + break; + + ocelot_parse_ifh(ifh, &info); + + dev = ocelot->ports[info.port]->dev; + + skb = netdev_alloc_skb(dev, info.len); + + if (unlikely(!skb)) { + netdev_err(dev, "Unable to allocate sk_buff\n"); + err = -ENOMEM; + break; + } + buf = (u32 *)skb_put(skb, info.len); + + len = 0; + do { + sz = ocelot_rx_frame_word(ocelot, grp, false, &val); + *buf++ = val; + len += sz; + } while ((sz == 4) && (len < info.len)); + + if (sz < 0) { + err = sz; + break; + } + + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded. + */ + if (ocelot->bridge_mask & BIT(info.port)) + skb->offload_fwd_mark = 1; + + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->stats.rx_bytes += len; + dev->stats.rx_packets++; + } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)); + + if (err) + while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) + ocelot_read_rix(ocelot, QS_XTR_RD, grp); + + return IRQ_HANDLED; +} + +static const struct of_device_id mscc_ocelot_match[] = { + { .compatible = "mscc,vsc7514-switch" }, + { } +}; +MODULE_DEVICE_TABLE(of, mscc_ocelot_match); + +static int mscc_ocelot_probe(struct platform_device *pdev) +{ + int err, irq; + unsigned int i; + struct device_node *np = pdev->dev.of_node; + struct device_node *ports, *portnp; + struct ocelot *ocelot; + u32 val; + + struct { + enum ocelot_target id; + char *name; + } res[] = { + { SYS, "sys" }, + { REW, "rew" }, + { QSYS, "qsys" }, + { ANA, "ana" }, + { QS, "qs" }, + { HSIO, "hsio" }, + }; + + if (!np && !pdev->dev.platform_data) + return -ENODEV; + + ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL); + if (!ocelot) + return -ENOMEM; + + platform_set_drvdata(pdev, ocelot); + ocelot->dev = &pdev->dev; + + for (i = 0; i < ARRAY_SIZE(res); i++) { + struct regmap *target; + + target = ocelot_io_platform_init(ocelot, pdev, res[i].name); + if (IS_ERR(target)) + return PTR_ERR(target); + + ocelot->targets[res[i].id] = target; + } + + err = ocelot_chip_init(ocelot); + if (err) + return err; + + irq = platform_get_irq_byname(pdev, "xtr"); + if (irq < 0) + return -ENODEV; + + err = devm_request_threaded_irq(&pdev->dev, irq, NULL, + ocelot_xtr_irq_handler, IRQF_ONESHOT, + "frame extraction", ocelot); + if (err) + return err; + + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1); + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + + do { + msleep(1); + regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], + &val); + } while (val); + + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1); + regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1); + + ocelot->num_cpu_ports = 1; /* 1 port on the switch, two groups */ + + ports = of_get_child_by_name(np, "ethernet-ports"); + if (!ports) { + dev_err(&pdev->dev, "no ethernet-ports child node found\n"); + return -ENODEV; + } + + ocelot->num_phys_ports = of_get_child_count(ports); + + ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports, + sizeof(struct ocelot_port *), GFP_KERNEL); + + INIT_LIST_HEAD(&ocelot->multicast); + ocelot_init(ocelot); + + ocelot_rmw(ocelot, HSIO_HW_CFG_DEV1G_4_MODE | + HSIO_HW_CFG_DEV1G_6_MODE | + HSIO_HW_CFG_DEV1G_9_MODE, + HSIO_HW_CFG_DEV1G_4_MODE | + HSIO_HW_CFG_DEV1G_6_MODE | + HSIO_HW_CFG_DEV1G_9_MODE, + HSIO_HW_CFG); + + for_each_available_child_of_node(ports, portnp) { + struct device_node *phy_node; + struct phy_device *phy; + struct resource *res; + void __iomem *regs; + char res_name[8]; + u32 port; + + if (of_property_read_u32(portnp, "reg", &port)) + continue; + + snprintf(res_name, sizeof(res_name), "port%d", port); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + continue; + + phy_node = of_parse_phandle(portnp, "phy-handle", 0); + if (!phy_node) + continue; + + phy = of_phy_find_device(phy_node); + if (!phy) + continue; + + err = ocelot_probe_port(ocelot, port, regs, phy); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + } + + register_netdevice_notifier(&ocelot_netdevice_nb); + + dev_info(&pdev->dev, "Ocelot switch probed\n"); + + return 0; + +err_probe_ports: + return err; +} + +static int mscc_ocelot_remove(struct platform_device *pdev) +{ + struct ocelot *ocelot = platform_get_drvdata(pdev); + + ocelot_deinit(ocelot); + unregister_netdevice_notifier(&ocelot_netdevice_nb); + + return 0; +} + +static struct platform_driver mscc_ocelot_driver = { + .probe = mscc_ocelot_probe, + .remove = mscc_ocelot_remove, + .driver = { + .name = "ocelot-switch", + .of_match_table = mscc_ocelot_match, + }, +}; + +module_platform_driver(mscc_ocelot_driver); + +MODULE_DESCRIPTION("Microsemi Ocelot switch driver"); +MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/mscc/ocelot_dev.h b/drivers/net/ethernet/mscc/ocelot_dev.h new file mode 100644 index 000000000000..0a50d53bbd3f --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_dev.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_DEV_H_ +#define _MSCC_OCELOT_DEV_H_ + +#define DEV_CLOCK_CFG 0x0 + +#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) +#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) +#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) +#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4) +#define DEV_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_CLOCK_CFG_PHY_RST BIT(2) +#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) +#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) + +#define DEV_PORT_MISC 0x4 + +#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4) +#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3) +#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2) +#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1) +#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0) + +#define DEV_EVENTS 0x8 + +#define DEV_EEE_CFG 0xc + +#define DEV_EEE_CFG_EEE_ENA BIT(22) +#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) +#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) +#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15) +#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8)) +#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8) +#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8) +#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1)) +#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1) +#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) +#define DEV_EEE_CFG_PORT_LPI BIT(0) + +#define DEV_RX_PATH_DELAY 0x10 + +#define DEV_TX_PATH_DELAY 0x14 + +#define DEV_PTP_PREDICT_CFG 0x18 + +#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4)) +#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4) +#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4) +#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0)) +#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0) + +#define DEV_MAC_ENA_CFG 0x1c + +#define DEV_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_MAC_ENA_CFG_TX_ENA BIT(0) + +#define DEV_MAC_MODE_CFG 0x20 + +#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0) + +#define DEV_MAC_MAXLEN_CFG 0x24 + +#define DEV_MAC_TAGS_CFG 0x28 + +#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) +#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) +#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) +#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) + +#define DEV_MAC_ADV_CHK_CFG 0x2c + +#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) + +#define DEV_MAC_IFG_CFG 0x30 + +#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) +#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) +#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) +#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8) +#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8) +#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4)) +#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4) +#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) +#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) + +#define DEV_MAC_HDX_CFG 0x34 + +#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) +#define DEV_MAC_HDX_CFG_OB_ENA BIT(25) +#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24) +#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16)) +#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16) +#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) +#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) +#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) + +#define DEV_MAC_DBG_CFG 0x38 + +#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4) +#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) + +#define DEV_MAC_FC_MAC_LOW_CFG 0x3c + +#define DEV_MAC_FC_MAC_HIGH_CFG 0x40 + +#define DEV_MAC_STICKY 0x44 + +#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) +#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) +#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) +#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6) +#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5) +#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4) +#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3) +#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2) +#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) +#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0) + +#define PCS1G_CFG 0x48 + +#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4) +#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1) +#define PCS1G_CFG_PCS_ENA BIT(0) + +#define PCS1G_MODE_CFG 0x4c + +#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4) +#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) + +#define PCS1G_SD_CFG 0x50 + +#define PCS1G_SD_CFG_SD_SEL BIT(8) +#define PCS1G_SD_CFG_SD_POL BIT(4) +#define PCS1G_SD_CFG_SD_ENA BIT(0) + +#define PCS1G_ANEG_CFG 0x54 + +#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) +#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16) +#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8) +#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1) +#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0) + +#define PCS1G_ANEG_NP_CFG 0x58 + +#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16)) +#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16) +#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0) + +#define PCS1G_LB_CFG 0x5c + +#define PCS1G_LB_CFG_RA_ENA BIT(4) +#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1) +#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0) + +#define PCS1G_DBG_CFG 0x60 + +#define PCS1G_DBG_CFG_UDLT BIT(0) + +#define PCS1G_CDET_CFG 0x64 + +#define PCS1G_CDET_CFG_CDET_ENA BIT(0) + +#define PCS1G_ANEG_STATUS 0x68 + +#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) +#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16) +#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define PCS1G_ANEG_STATUS_PR BIT(4) +#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3) +#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) + +#define PCS1G_ANEG_NP_STATUS 0x6c + +#define PCS1G_LINK_STATUS 0x70 + +#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12)) +#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12) +#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12) +#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8) +#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4) +#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) + +#define PCS1G_LINK_DOWN_CNT 0x74 + +#define PCS1G_STICKY 0x78 + +#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) +#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0) + +#define PCS1G_DEBUG_STATUS 0x7c + +#define PCS1G_LPI_CFG 0x80 + +#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20) +#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17) +#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16) +#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4)) +#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4) +#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0) + +#define PCS1G_LPI_WAKE_ERROR_CNT 0x84 + +#define PCS1G_LPI_STATUS 0x88 + +#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16) +#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12) +#define PCS1G_LPI_STATUS_RX_QUIET BIT(9) +#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8) +#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4) +#define PCS1G_LPI_STATUS_TX_QUIET BIT(1) +#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0) + +#define PCS1G_TSTPAT_MODE_CFG 0x8c + +#define PCS1G_TSTPAT_STATUS 0x90 + +#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8)) +#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8) +#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4) +#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0) + +#define DEV_PCS_FX100_CFG 0x94 + +#define DEV_PCS_FX100_CFG_SD_SEL BIT(26) +#define DEV_PCS_FX100_CFG_SD_POL BIT(25) +#define DEV_PCS_FX100_CFG_SD_ENA BIT(24) +#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20) +#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16) +#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12)) +#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12) +#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12) +#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9)) +#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9) +#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9) +#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8) +#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4)) +#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4) +#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3) +#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2) +#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1) +#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0) + +#define DEV_PCS_FX100_STATUS 0x98 + +#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8)) +#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8) +#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8) +#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7) +#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6) +#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5) +#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4) +#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2) +#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1) +#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_dev_gmii.h b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h new file mode 100644 index 000000000000..6aa40ea223a2 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_dev_gmii.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_DEV_GMII_H_ +#define _MSCC_OCELOT_DEV_GMII_H_ + +#define DEV_GMII_PORT_MODE_CLOCK_CFG 0x0 + +#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_TX_RST BIT(5) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_MAC_RX_RST BIT(4) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_PHY_RST BIT(2) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) +#define DEV_GMII_PORT_MODE_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) + +#define DEV_GMII_PORT_MODE_PORT_MISC 0x4 + +#define DEV_GMII_PORT_MODE_PORT_MISC_MPLS_RX_ENA BIT(5) +#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_ERROR_ENA BIT(4) +#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_PAUSE_ENA BIT(3) +#define DEV_GMII_PORT_MODE_PORT_MISC_FWD_CTRL_ENA BIT(2) +#define DEV_GMII_PORT_MODE_PORT_MISC_GMII_LOOP_ENA BIT(1) +#define DEV_GMII_PORT_MODE_PORT_MISC_DEV_LOOP_ENA BIT(0) + +#define DEV_GMII_PORT_MODE_EVENTS 0x8 + +#define DEV_GMII_PORT_MODE_EEE_CFG 0xc + +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_ENA BIT(22) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8)) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1)) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1) +#define DEV_GMII_PORT_MODE_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) +#define DEV_GMII_PORT_MODE_EEE_CFG_PORT_LPI BIT(0) + +#define DEV_GMII_PORT_MODE_RX_PATH_DELAY 0x10 + +#define DEV_GMII_PORT_MODE_TX_PATH_DELAY 0x14 + +#define DEV_GMII_PORT_MODE_PTP_PREDICT_CFG 0x18 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG 0x1c + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_ENA_CFG_TX_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG 0x20 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_MODE_CFG_FDX_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_MAXLEN_CFG 0x24 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG 0x28 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_PB_ENA BIT(1) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV_GMII_MAC_CFG_STATUS_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG 0x2c + +#define DEV_GMII_MAC_CFG_STATUS_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG 0x30 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG 0x34 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_OB_ENA BIT(25) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_WEXC_DIS BIT(24) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_M GENMASK(23, 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) +#define DEV_GMII_MAC_CFG_STATUS_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG 0x38 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_TBI_MODE BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) + +#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_LOW_CFG 0x3c + +#define DEV_GMII_MAC_CFG_STATUS_MAC_FC_MAC_HIGH_CFG 0x40 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY 0x44 + +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_RX_JUNK_STICKY BIT(5) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_JAM_STICKY BIT(3) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) +#define DEV_GMII_MAC_CFG_STATUS_MAC_STICKY_TX_ABORT_STICKY BIT(0) + +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG 0x48 + +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA BIT(0) +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA BIT(4) +#define DEV_GMII_MM_CONFIG_ENABLE_CONFIG_KEEP_S_AFTER_D BIT(8) + +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG 0x4c + +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS BIT(0) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(x) (((x) << 4) & GENMASK(11, 4)) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M GENMASK(11, 4) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(x) (((x) & GENMASK(11, 4)) >> 4) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS(x) (((x) << 12) & GENMASK(13, 12)) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_M GENMASK(13, 12) +#define DEV_GMII_MM_CONFIG_VERIF_CONFIG_VERIF_TIMER_UNITS_X(x) (((x) & GENMASK(13, 12)) >> 12) + +#define DEV_GMII_MM_STATISTICS_MM_STATUS 0x50 + +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STATUS BIT(0) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_ACTIVE_STICKY BIT(4) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE(x) (((x) << 8) & GENMASK(10, 8)) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_M GENMASK(10, 8) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_PRMPT_VERIFY_STATE_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_RX_PFRM_STICKY BIT(12) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_UNEXP_TX_PFRM_STICKY BIT(16) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_RX_FRAME_STATUS BIT(20) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_FRAME_STATUS BIT(24) +#define DEV_GMII_MM_STATISTICS_MM_STATUS_MM_TX_PRMPT_STATUS BIT(28) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_hsio.h b/drivers/net/ethernet/mscc/ocelot_hsio.h new file mode 100644 index 000000000000..d93ddec3931b --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_hsio.h @@ -0,0 +1,785 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_HSIO_H_ +#define _MSCC_OCELOT_HSIO_H_ + +#define HSIO_PLL5G_CFG0_ENA_ROT BIT(31) +#define HSIO_PLL5G_CFG0_ENA_LANE BIT(30) +#define HSIO_PLL5G_CFG0_ENA_CLKTREE BIT(29) +#define HSIO_PLL5G_CFG0_DIV4 BIT(28) +#define HSIO_PLL5G_CFG0_ENA_LOCK_FINE BIT(27) +#define HSIO_PLL5G_CFG0_SELBGV820(x) (((x) << 23) & GENMASK(26, 23)) +#define HSIO_PLL5G_CFG0_SELBGV820_M GENMASK(26, 23) +#define HSIO_PLL5G_CFG0_SELBGV820_X(x) (((x) & GENMASK(26, 23)) >> 23) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES_M GENMASK(22, 18) +#define HSIO_PLL5G_CFG0_LOOP_BW_RES_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_PLL5G_CFG0_SELCPI(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_PLL5G_CFG0_SELCPI_M GENMASK(17, 16) +#define HSIO_PLL5G_CFG0_SELCPI_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_PLL5G_CFG0_ENA_VCO_CONTRH BIT(15) +#define HSIO_PLL5G_CFG0_ENA_CP1 BIT(14) +#define HSIO_PLL5G_CFG0_ENA_VCO_BUF BIT(13) +#define HSIO_PLL5G_CFG0_ENA_BIAS BIT(12) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_M GENMASK(11, 6) +#define HSIO_PLL5G_CFG0_CPU_CLK_DIV_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_PLL5G_CFG0_CORE_CLK_DIV(x) ((x) & GENMASK(5, 0)) +#define HSIO_PLL5G_CFG0_CORE_CLK_DIV_M GENMASK(5, 0) + +#define HSIO_PLL5G_CFG1_ENA_DIRECT BIT(18) +#define HSIO_PLL5G_CFG1_ROT_SPEED BIT(17) +#define HSIO_PLL5G_CFG1_ROT_DIR BIT(16) +#define HSIO_PLL5G_CFG1_READBACK_DATA_SEL BIT(15) +#define HSIO_PLL5G_CFG1_RC_ENABLE BIT(14) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_M GENMASK(13, 6) +#define HSIO_PLL5G_CFG1_RC_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) +#define HSIO_PLL5G_CFG1_QUARTER_RATE BIT(5) +#define HSIO_PLL5G_CFG1_PWD_TX BIT(4) +#define HSIO_PLL5G_CFG1_PWD_RX BIT(3) +#define HSIO_PLL5G_CFG1_OUT_OF_RANGE_RECAL_ENA BIT(2) +#define HSIO_PLL5G_CFG1_HALF_RATE BIT(1) +#define HSIO_PLL5G_CFG1_FORCE_SET_ENA BIT(0) + +#define HSIO_PLL5G_CFG2_ENA_TEST_MODE BIT(30) +#define HSIO_PLL5G_CFG2_ENA_PFD_IN_FLIP BIT(29) +#define HSIO_PLL5G_CFG2_ENA_VCO_NREF_TESTOUT BIT(28) +#define HSIO_PLL5G_CFG2_ENA_FBTESTOUT BIT(27) +#define HSIO_PLL5G_CFG2_ENA_RCPLL BIT(26) +#define HSIO_PLL5G_CFG2_ENA_CP2 BIT(25) +#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS1 BIT(24) +#define HSIO_PLL5G_CFG2_AMPC_SEL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG2_AMPC_SEL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG2_AMPC_SEL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG2_ENA_CLK_BYPASS BIT(15) +#define HSIO_PLL5G_CFG2_PWD_AMPCTRL_N BIT(14) +#define HSIO_PLL5G_CFG2_ENA_AMPCTRL BIT(13) +#define HSIO_PLL5G_CFG2_ENA_AMP_CTRL_FORCE BIT(12) +#define HSIO_PLL5G_CFG2_FRC_FSM_POR BIT(11) +#define HSIO_PLL5G_CFG2_DISABLE_FSM_POR BIT(10) +#define HSIO_PLL5G_CFG2_GAIN_TEST(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_PLL5G_CFG2_GAIN_TEST_M GENMASK(9, 5) +#define HSIO_PLL5G_CFG2_GAIN_TEST_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_PLL5G_CFG2_EN_RESET_OVERRUN BIT(4) +#define HSIO_PLL5G_CFG2_EN_RESET_LIM_DET BIT(3) +#define HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET BIT(2) +#define HSIO_PLL5G_CFG2_DISABLE_FSM BIT(1) +#define HSIO_PLL5G_CFG2_ENA_GAIN_TEST BIT(0) + +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL(x) (((x) << 22) & GENMASK(23, 22)) +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_M GENMASK(23, 22) +#define HSIO_PLL5G_CFG3_TEST_ANA_OUT_SEL_X(x) (((x) & GENMASK(23, 22)) >> 22) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL_M GENMASK(21, 19) +#define HSIO_PLL5G_CFG3_TESTOUT_SEL_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_PLL5G_CFG3_ENA_ANA_TEST_OUT BIT(18) +#define HSIO_PLL5G_CFG3_ENA_TEST_OUT BIT(17) +#define HSIO_PLL5G_CFG3_SEL_FBDCLK BIT(16) +#define HSIO_PLL5G_CFG3_SEL_CML_CMOS_PFD BIT(15) +#define HSIO_PLL5G_CFG3_RST_FB_N BIT(14) +#define HSIO_PLL5G_CFG3_FORCE_VCO_CONTRH BIT(13) +#define HSIO_PLL5G_CFG3_FORCE_LO BIT(12) +#define HSIO_PLL5G_CFG3_FORCE_HI BIT(11) +#define HSIO_PLL5G_CFG3_FORCE_ENA BIT(10) +#define HSIO_PLL5G_CFG3_FORCE_CP BIT(9) +#define HSIO_PLL5G_CFG3_FBDIVSEL_TST_ENA BIT(8) +#define HSIO_PLL5G_CFG3_FBDIVSEL(x) ((x) & GENMASK(7, 0)) +#define HSIO_PLL5G_CFG3_FBDIVSEL_M GENMASK(7, 0) + +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG4_IB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG4_IB_CTRL(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_CFG4_IB_CTRL_M GENMASK(15, 0) + +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL(x) (((x) << 16) & GENMASK(23, 16)) +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_M GENMASK(23, 16) +#define HSIO_PLL5G_CFG5_OB_BIAS_CTRL_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define HSIO_PLL5G_CFG5_OB_CTRL(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_CFG5_OB_CTRL_M GENMASK(15, 0) + +#define HSIO_PLL5G_CFG6_REFCLK_SEL_SRC BIT(23) +#define HSIO_PLL5G_CFG6_REFCLK_SEL(x) (((x) << 20) & GENMASK(22, 20)) +#define HSIO_PLL5G_CFG6_REFCLK_SEL_M GENMASK(22, 20) +#define HSIO_PLL5G_CFG6_REFCLK_SEL_X(x) (((x) & GENMASK(22, 20)) >> 20) +#define HSIO_PLL5G_CFG6_REFCLK_SRC BIT(19) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL_M GENMASK(17, 16) +#define HSIO_PLL5G_CFG6_POR_DEL_SEL_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL_M GENMASK(15, 8) +#define HSIO_PLL5G_CFG6_DIV125REF_SEL_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_PLL5G_CFG6_ENA_REFCLKC2 BIT(7) +#define HSIO_PLL5G_CFG6_ENA_FBCLKC2 BIT(6) +#define HSIO_PLL5G_CFG6_DDR_CLK_DIV(x) ((x) & GENMASK(5, 0)) +#define HSIO_PLL5G_CFG6_DDR_CLK_DIV_M GENMASK(5, 0) + +#define HSIO_PLL5G_STATUS0_RANGE_LIM BIT(12) +#define HSIO_PLL5G_STATUS0_OUT_OF_RANGE_ERR BIT(11) +#define HSIO_PLL5G_STATUS0_CALIBRATION_ERR BIT(10) +#define HSIO_PLL5G_STATUS0_CALIBRATION_DONE BIT(9) +#define HSIO_PLL5G_STATUS0_READBACK_DATA(x) (((x) << 1) & GENMASK(8, 1)) +#define HSIO_PLL5G_STATUS0_READBACK_DATA_M GENMASK(8, 1) +#define HSIO_PLL5G_STATUS0_READBACK_DATA_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define HSIO_PLL5G_STATUS0_LOCK_STATUS BIT(0) + +#define HSIO_PLL5G_STATUS1_SIG_DEL(x) (((x) << 21) & GENMASK(28, 21)) +#define HSIO_PLL5G_STATUS1_SIG_DEL_M GENMASK(28, 21) +#define HSIO_PLL5G_STATUS1_SIG_DEL_X(x) (((x) & GENMASK(28, 21)) >> 21) +#define HSIO_PLL5G_STATUS1_GAIN_STAT(x) (((x) << 16) & GENMASK(20, 16)) +#define HSIO_PLL5G_STATUS1_GAIN_STAT_M GENMASK(20, 16) +#define HSIO_PLL5G_STATUS1_GAIN_STAT_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF(x) (((x) << 4) & GENMASK(13, 4)) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF_M GENMASK(13, 4) +#define HSIO_PLL5G_STATUS1_FBCNT_DIF_X(x) (((x) & GENMASK(13, 4)) >> 4) +#define HSIO_PLL5G_STATUS1_FSM_STAT(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_PLL5G_STATUS1_FSM_STAT_M GENMASK(3, 1) +#define HSIO_PLL5G_STATUS1_FSM_STAT_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_PLL5G_STATUS1_FSM_LOCK BIT(0) + +#define HSIO_PLL5G_BIST_CFG0_PLLB_START_BIST BIT(31) +#define HSIO_PLL5G_BIST_CFG0_PLLB_MEAS_MODE BIT(30) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT(x) (((x) << 20) & GENMASK(23, 20)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_M GENMASK(23, 20) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_REPEAT_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT(x) (((x) << 16) & GENMASK(19, 16)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_M GENMASK(19, 16) +#define HSIO_PLL5G_BIST_CFG0_PLLB_LOCK_UNCERT_X(x) (((x) & GENMASK(19, 16)) >> 16) +#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_BIST_CFG0_PLLB_DIV_FACTOR_PRE_M GENMASK(15, 0) + +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_M GENMASK(7, 4) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FSM_STAT_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_PLL5G_BIST_STAT0_PLLB_BUSY BIT(2) +#define HSIO_PLL5G_BIST_STAT0_PLLB_DONE_N BIT(1) +#define HSIO_PLL5G_BIST_STAT0_PLLB_FAIL BIT(0) + +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT(x) (((x) << 16) & GENMASK(31, 16)) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_M GENMASK(31, 16) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_OUT_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF(x) ((x) & GENMASK(15, 0)) +#define HSIO_PLL5G_BIST_STAT1_PLLB_CNT_REF_DIFF_M GENMASK(15, 0) + +#define HSIO_RCOMP_CFG0_PWD_ENA BIT(13) +#define HSIO_RCOMP_CFG0_RUN_CAL BIT(12) +#define HSIO_RCOMP_CFG0_SPEED_SEL(x) (((x) << 10) & GENMASK(11, 10)) +#define HSIO_RCOMP_CFG0_SPEED_SEL_M GENMASK(11, 10) +#define HSIO_RCOMP_CFG0_SPEED_SEL_X(x) (((x) & GENMASK(11, 10)) >> 10) +#define HSIO_RCOMP_CFG0_MODE_SEL(x) (((x) << 8) & GENMASK(9, 8)) +#define HSIO_RCOMP_CFG0_MODE_SEL_M GENMASK(9, 8) +#define HSIO_RCOMP_CFG0_MODE_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define HSIO_RCOMP_CFG0_FORCE_ENA BIT(4) +#define HSIO_RCOMP_CFG0_RCOMP_VAL(x) ((x) & GENMASK(3, 0)) +#define HSIO_RCOMP_CFG0_RCOMP_VAL_M GENMASK(3, 0) + +#define HSIO_RCOMP_STATUS_BUSY BIT(12) +#define HSIO_RCOMP_STATUS_DELTA_ALERT BIT(7) +#define HSIO_RCOMP_STATUS_RCOMP(x) ((x) & GENMASK(3, 0)) +#define HSIO_RCOMP_STATUS_RCOMP_M GENMASK(3, 0) + +#define HSIO_SYNC_ETH_CFG_RSZ 0x4 + +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_M GENMASK(7, 4) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_SRC_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_M GENMASK(3, 1) +#define HSIO_SYNC_ETH_CFG_SEL_RECO_CLK_DIV_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_SYNC_ETH_CFG_RECO_CLK_ENA BIT(0) + +#define HSIO_SYNC_ETH_PLL_CFG_PLL_AUTO_SQUELCH_ENA BIT(0) + +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) +#define HSIO_S1G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_M GENMASK(12, 11) +#define HSIO_S1G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 8) & GENMASK(10, 8)) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_M GENMASK(10, 8) +#define HSIO_S1G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(10, 8)) >> 8) +#define HSIO_S1G_DES_CFG_DES_BW_ANA(x) (((x) << 5) & GENMASK(7, 5)) +#define HSIO_S1G_DES_CFG_DES_BW_ANA_M GENMASK(7, 5) +#define HSIO_S1G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define HSIO_S1G_DES_CFG_DES_SWAP_ANA BIT(4) +#define HSIO_S1G_DES_CFG_DES_BW_HYST(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_S1G_DES_CFG_DES_BW_HYST_M GENMASK(3, 1) +#define HSIO_S1G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_S1G_DES_CFG_DES_SWAP_HYST BIT(0) + +#define HSIO_S1G_IB_CFG_IB_FX100_ENA BIT(27) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST(x) (((x) << 24) & GENMASK(26, 24)) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST_M GENMASK(26, 24) +#define HSIO_S1G_IB_CFG_ACJTAG_HYST_X(x) (((x) & GENMASK(26, 24)) >> 24) +#define HSIO_S1G_IB_CFG_IB_DET_LEV(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_S1G_IB_CFG_IB_DET_LEV_M GENMASK(21, 19) +#define HSIO_S1G_IB_CFG_IB_DET_LEV_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_S1G_IB_CFG_IB_HYST_LEV BIT(14) +#define HSIO_S1G_IB_CFG_IB_ENA_CMV_TERM BIT(13) +#define HSIO_S1G_IB_CFG_IB_ENA_DC_COUPLING BIT(12) +#define HSIO_S1G_IB_CFG_IB_ENA_DETLEV BIT(11) +#define HSIO_S1G_IB_CFG_IB_ENA_HYST BIT(10) +#define HSIO_S1G_IB_CFG_IB_ENA_OFFSET_COMP BIT(9) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_M GENMASK(8, 6) +#define HSIO_S1G_IB_CFG_IB_EQ_GAIN_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_M GENMASK(5, 4) +#define HSIO_S1G_IB_CFG_IB_SEL_CORNER_FREQ_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S1G_IB_CFG_IB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S1G_OB_CFG_OB_SLP(x) (((x) << 17) & GENMASK(18, 17)) +#define HSIO_S1G_OB_CFG_OB_SLP_M GENMASK(18, 17) +#define HSIO_S1G_OB_CFG_OB_SLP_X(x) (((x) & GENMASK(18, 17)) >> 17) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_M GENMASK(16, 13) +#define HSIO_S1G_OB_CFG_OB_AMP_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL(x) (((x) << 10) & GENMASK(12, 10)) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_M GENMASK(12, 10) +#define HSIO_S1G_OB_CFG_OB_CMM_BIAS_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) +#define HSIO_S1G_OB_CFG_OB_DIS_VCM_CTRL BIT(9) +#define HSIO_S1G_OB_CFG_OB_EN_MEAS_VREG BIT(8) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_M GENMASK(7, 4) +#define HSIO_S1G_OB_CFG_OB_VCM_CTRL_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S1G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S1G_SER_CFG_SER_IDLE BIT(9) +#define HSIO_S1G_SER_CFG_SER_DEEMPH BIT(8) +#define HSIO_S1G_SER_CFG_SER_CPMD_SEL BIT(7) +#define HSIO_S1G_SER_CFG_SER_SWAP_CPMD BIT(6) +#define HSIO_S1G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S1G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) +#define HSIO_S1G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S1G_SER_CFG_SER_ENHYS BIT(3) +#define HSIO_S1G_SER_CFG_SER_BIG_WIN BIT(2) +#define HSIO_S1G_SER_CFG_SER_EN_WIN BIT(1) +#define HSIO_S1G_SER_CFG_SER_ENALI BIT(0) + +#define HSIO_S1G_COMMON_CFG_SYS_RST BIT(31) +#define HSIO_S1G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(21) +#define HSIO_S1G_COMMON_CFG_ENA_LANE BIT(18) +#define HSIO_S1G_COMMON_CFG_PWD_RX BIT(17) +#define HSIO_S1G_COMMON_CFG_PWD_TX BIT(16) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL(x) (((x) << 13) & GENMASK(15, 13)) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL_M GENMASK(15, 13) +#define HSIO_S1G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(15, 13)) >> 13) +#define HSIO_S1G_COMMON_CFG_ENA_DIRECT BIT(12) +#define HSIO_S1G_COMMON_CFG_ENA_ELOOP BIT(11) +#define HSIO_S1G_COMMON_CFG_ENA_FLOOP BIT(10) +#define HSIO_S1G_COMMON_CFG_ENA_ILOOP BIT(9) +#define HSIO_S1G_COMMON_CFG_ENA_PLOOP BIT(8) +#define HSIO_S1G_COMMON_CFG_HRATE BIT(7) +#define HSIO_S1G_COMMON_CFG_IF_MODE BIT(0) + +#define HSIO_S1G_PLL_CFG_PLL_ENA_FB_DIV2 BIT(22) +#define HSIO_S1G_PLL_CFG_PLL_ENA_RC_DIV2 BIT(21) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(15, 8) +#define HSIO_S1G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S1G_PLL_CFG_PLL_FSM_ENA BIT(7) +#define HSIO_S1G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(6) +#define HSIO_S1G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(5) +#define HSIO_S1G_PLL_CFG_PLL_RB_DATA_SEL BIT(3) + +#define HSIO_S1G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(12) +#define HSIO_S1G_PLL_STATUS_PLL_CAL_ERR BIT(11) +#define HSIO_S1G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(10) +#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) +#define HSIO_S1G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) + +#define HSIO_S1G_DFT_CFG0_LAZYBIT BIT(31) +#define HSIO_S1G_DFT_CFG0_INV_DIS BIT(23) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) +#define HSIO_S1G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) +#define HSIO_S1G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S1G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) +#define HSIO_S1G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S1G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) +#define HSIO_S1G_DFT_CFG0_RX_PDSENS_ENA BIT(3) +#define HSIO_S1G_DFT_CFG0_RX_DFT_ENA BIT(2) +#define HSIO_S1G_DFT_CFG0_TX_DFT_ENA BIT(0) + +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S1G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S1G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_DFT_CFG1_TX_JI_ENA BIT(3) +#define HSIO_S1G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) +#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) +#define HSIO_S1G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) + +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S1G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S1G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S1G_DFT_CFG2_RX_JI_ENA BIT(3) +#define HSIO_S1G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) +#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) +#define HSIO_S1G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) + +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(17, 16) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) +#define HSIO_S1G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) + +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) +#define HSIO_S1G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) +#define HSIO_S1G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) +#define HSIO_S1G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) +#define HSIO_S1G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) +#define HSIO_S1G_MISC_CFG_RX_DATA_INV_ENA BIT(3) +#define HSIO_S1G_MISC_CFG_TX_DATA_INV_ENA BIT(2) +#define HSIO_S1G_MISC_CFG_LANE_RST BIT(0) + +#define HSIO_S1G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) +#define HSIO_S1G_DFT_STATUS_PLL_BIST_FAILED BIT(6) +#define HSIO_S1G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) +#define HSIO_S1G_DFT_STATUS_BIST_ACTIVE BIT(3) +#define HSIO_S1G_DFT_STATUS_BIST_NOSYNC BIT(2) +#define HSIO_S1G_DFT_STATUS_BIST_COMPLETE_N BIT(1) +#define HSIO_S1G_DFT_STATUS_BIST_ERROR BIT(0) + +#define HSIO_S1G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) + +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_WR_ONE_SHOT BIT(31) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_RD_ONE_SHOT BIT(30) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR(x) ((x) & GENMASK(8, 0)) +#define HSIO_MCB_S1G_ADDR_CFG_SERDES1G_ADDR_M GENMASK(8, 0) + +#define HSIO_S6G_DIG_CFG_GP(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_DIG_CFG_GP_M GENMASK(18, 16) +#define HSIO_S6G_DIG_CFG_GP_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_DIG_CFG_TX_BIT_DOUBLING_MODE_ENA BIT(7) +#define HSIO_S6G_DIG_CFG_SIGDET_TESTMODE BIT(6) +#define HSIO_S6G_DIG_CFG_SIGDET_AST(x) (((x) << 3) & GENMASK(5, 3)) +#define HSIO_S6G_DIG_CFG_SIGDET_AST_M GENMASK(5, 3) +#define HSIO_S6G_DIG_CFG_SIGDET_AST_X(x) (((x) & GENMASK(5, 3)) >> 3) +#define HSIO_S6G_DIG_CFG_SIGDET_DST(x) ((x) & GENMASK(2, 0)) +#define HSIO_S6G_DIG_CFG_SIGDET_DST_M GENMASK(2, 0) + +#define HSIO_S6G_DFT_CFG0_LAZYBIT BIT(31) +#define HSIO_S6G_DFT_CFG0_INV_DIS BIT(23) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL(x) (((x) << 20) & GENMASK(21, 20)) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL_M GENMASK(21, 20) +#define HSIO_S6G_DFT_CFG0_PRBS_SEL_X(x) (((x) & GENMASK(21, 20)) >> 20) +#define HSIO_S6G_DFT_CFG0_TEST_MODE(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_DFT_CFG0_TEST_MODE_M GENMASK(18, 16) +#define HSIO_S6G_DFT_CFG0_TEST_MODE_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_DFT_CFG0_RX_PHS_CORR_DIS BIT(4) +#define HSIO_S6G_DFT_CFG0_RX_PDSENS_ENA BIT(3) +#define HSIO_S6G_DFT_CFG0_RX_DFT_ENA BIT(2) +#define HSIO_S6G_DFT_CFG0_TX_DFT_ENA BIT(0) + +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S6G_DFT_CFG1_TX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S6G_DFT_CFG1_TX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_DFT_CFG1_TX_JI_ENA BIT(3) +#define HSIO_S6G_DFT_CFG1_TX_WAVEFORM_SEL BIT(2) +#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_DIR BIT(1) +#define HSIO_S6G_DFT_CFG1_TX_FREQOFF_ENA BIT(0) + +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL(x) (((x) << 8) & GENMASK(17, 8)) +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_M GENMASK(17, 8) +#define HSIO_S6G_DFT_CFG2_RX_JITTER_AMPL_X(x) (((x) & GENMASK(17, 8)) >> 8) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_M GENMASK(7, 4) +#define HSIO_S6G_DFT_CFG2_RX_STEP_FREQ_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_DFT_CFG2_RX_JI_ENA BIT(3) +#define HSIO_S6G_DFT_CFG2_RX_WAVEFORM_SEL BIT(2) +#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_DIR BIT(1) +#define HSIO_S6G_DFT_CFG2_RX_FREQOFF_ENA BIT(0) + +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_ENA BIT(20) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH(x) (((x) << 16) & GENMASK(19, 16)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_M GENMASK(19, 16) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_FBS_HIGH_X(x) (((x) & GENMASK(19, 16)) >> 16) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_M GENMASK(15, 8) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_HIGH_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW(x) ((x) & GENMASK(7, 0)) +#define HSIO_S6G_RC_PLL_BIST_CFG_PLL_BIST_LOW_M GENMASK(7, 0) + +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK(x) (((x) << 13) & GENMASK(14, 13)) +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_M GENMASK(14, 13) +#define HSIO_S6G_MISC_CFG_SEL_RECO_CLK_X(x) (((x) & GENMASK(14, 13)) >> 13) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_M GENMASK(12, 11) +#define HSIO_S6G_MISC_CFG_DES_100FX_KICK_MODE_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_SWAP BIT(10) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_MODE BIT(9) +#define HSIO_S6G_MISC_CFG_DES_100FX_CPMD_ENA BIT(8) +#define HSIO_S6G_MISC_CFG_RX_BUS_FLIP_ENA BIT(7) +#define HSIO_S6G_MISC_CFG_TX_BUS_FLIP_ENA BIT(6) +#define HSIO_S6G_MISC_CFG_RX_LPI_MODE_ENA BIT(5) +#define HSIO_S6G_MISC_CFG_TX_LPI_MODE_ENA BIT(4) +#define HSIO_S6G_MISC_CFG_RX_DATA_INV_ENA BIT(3) +#define HSIO_S6G_MISC_CFG_TX_DATA_INV_ENA BIT(2) +#define HSIO_S6G_MISC_CFG_LANE_RST BIT(0) + +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_M GENMASK(28, 23) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_M GENMASK(22, 18) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_POST1_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC(x) (((x) << 13) & GENMASK(17, 13)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_M GENMASK(17, 13) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_PREC_X(x) (((x) & GENMASK(17, 13)) >> 13) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_M GENMASK(8, 6) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_OB_ANEG_CFG_AN_OB_LEV_M GENMASK(5, 0) + +#define HSIO_S6G_DFT_STATUS_PRBS_SYNC_STAT BIT(8) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_NOT_DONE BIT(7) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_FAILED BIT(6) +#define HSIO_S6G_DFT_STATUS_PLL_BIST_TIMEOUT_ERR BIT(5) +#define HSIO_S6G_DFT_STATUS_BIST_ACTIVE BIT(3) +#define HSIO_S6G_DFT_STATUS_BIST_NOSYNC BIT(2) +#define HSIO_S6G_DFT_STATUS_BIST_COMPLETE_N BIT(1) +#define HSIO_S6G_DFT_STATUS_BIST_ERROR BIT(0) + +#define HSIO_S6G_MISC_STATUS_DES_100FX_PHASE_SEL BIT(0) + +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL(x) (((x) << 13) & GENMASK(16, 13)) +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_M GENMASK(16, 13) +#define HSIO_S6G_DES_CFG_DES_PHS_CTRL_X(x) (((x) & GENMASK(16, 13)) >> 13) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL(x) (((x) << 10) & GENMASK(12, 10)) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_M GENMASK(12, 10) +#define HSIO_S6G_DES_CFG_DES_MBTR_CTRL_X(x) (((x) & GENMASK(12, 10)) >> 10) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL(x) (((x) << 8) & GENMASK(9, 8)) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_M GENMASK(9, 8) +#define HSIO_S6G_DES_CFG_DES_CPMD_SEL_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define HSIO_S6G_DES_CFG_DES_BW_HYST(x) (((x) << 5) & GENMASK(7, 5)) +#define HSIO_S6G_DES_CFG_DES_BW_HYST_M GENMASK(7, 5) +#define HSIO_S6G_DES_CFG_DES_BW_HYST_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define HSIO_S6G_DES_CFG_DES_SWAP_HYST BIT(4) +#define HSIO_S6G_DES_CFG_DES_BW_ANA(x) (((x) << 1) & GENMASK(3, 1)) +#define HSIO_S6G_DES_CFG_DES_BW_ANA_M GENMASK(3, 1) +#define HSIO_S6G_DES_CFG_DES_BW_ANA_X(x) (((x) & GENMASK(3, 1)) >> 1) +#define HSIO_S6G_DES_CFG_DES_SWAP_ANA BIT(0) + +#define HSIO_S6G_IB_CFG_IB_SOFSI(x) (((x) << 29) & GENMASK(30, 29)) +#define HSIO_S6G_IB_CFG_IB_SOFSI_M GENMASK(30, 29) +#define HSIO_S6G_IB_CFG_IB_SOFSI_X(x) (((x) & GENMASK(30, 29)) >> 29) +#define HSIO_S6G_IB_CFG_IB_VBULK_SEL BIT(28) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ(x) (((x) << 24) & GENMASK(27, 24)) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_M GENMASK(27, 24) +#define HSIO_S6G_IB_CFG_IB_RTRM_ADJ_X(x) (((x) & GENMASK(27, 24)) >> 24) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ(x) (((x) << 20) & GENMASK(23, 20)) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_M GENMASK(23, 20) +#define HSIO_S6G_IB_CFG_IB_ICML_ADJ_X(x) (((x) & GENMASK(23, 20)) >> 20) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL(x) (((x) << 18) & GENMASK(19, 18)) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_M GENMASK(19, 18) +#define HSIO_S6G_IB_CFG_IB_TERM_MODE_SEL_X(x) (((x) & GENMASK(19, 18)) >> 18) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL(x) (((x) << 15) & GENMASK(17, 15)) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_M GENMASK(17, 15) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_CLK_SEL_X(x) (((x) & GENMASK(17, 15)) >> 15) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP(x) (((x) << 13) & GENMASK(14, 13)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_M GENMASK(14, 13) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_HP_X(x) (((x) & GENMASK(14, 13)) >> 13) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID(x) (((x) << 11) & GENMASK(12, 11)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_M GENMASK(12, 11) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_MID_X(x) (((x) & GENMASK(12, 11)) >> 11) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP(x) (((x) << 9) & GENMASK(10, 9)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_M GENMASK(10, 9) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_LP_X(x) (((x) & GENMASK(10, 9)) >> 9) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET(x) (((x) << 7) & GENMASK(8, 7)) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_M GENMASK(8, 7) +#define HSIO_S6G_IB_CFG_IB_REG_PAT_SEL_OFFSET_X(x) (((x) & GENMASK(8, 7)) >> 7) +#define HSIO_S6G_IB_CFG_IB_ANA_TEST_ENA BIT(6) +#define HSIO_S6G_IB_CFG_IB_SIG_DET_ENA BIT(5) +#define HSIO_S6G_IB_CFG_IB_CONCUR BIT(4) +#define HSIO_S6G_IB_CFG_IB_CAL_ENA BIT(3) +#define HSIO_S6G_IB_CFG_IB_SAM_ENA BIT(2) +#define HSIO_S6G_IB_CFG_IB_EQZ_ENA BIT(1) +#define HSIO_S6G_IB_CFG_IB_REG_ENA BIT(0) + +#define HSIO_S6G_IB_CFG1_IB_TJTAG(x) (((x) << 17) & GENMASK(21, 17)) +#define HSIO_S6G_IB_CFG1_IB_TJTAG_M GENMASK(21, 17) +#define HSIO_S6G_IB_CFG1_IB_TJTAG_X(x) (((x) & GENMASK(21, 17)) >> 17) +#define HSIO_S6G_IB_CFG1_IB_TSDET(x) (((x) << 12) & GENMASK(16, 12)) +#define HSIO_S6G_IB_CFG1_IB_TSDET_M GENMASK(16, 12) +#define HSIO_S6G_IB_CFG1_IB_TSDET_X(x) (((x) & GENMASK(16, 12)) >> 12) +#define HSIO_S6G_IB_CFG1_IB_SCALY(x) (((x) << 8) & GENMASK(11, 8)) +#define HSIO_S6G_IB_CFG1_IB_SCALY_M GENMASK(11, 8) +#define HSIO_S6G_IB_CFG1_IB_SCALY_X(x) (((x) & GENMASK(11, 8)) >> 8) +#define HSIO_S6G_IB_CFG1_IB_FILT_HP BIT(7) +#define HSIO_S6G_IB_CFG1_IB_FILT_MID BIT(6) +#define HSIO_S6G_IB_CFG1_IB_FILT_LP BIT(5) +#define HSIO_S6G_IB_CFG1_IB_FILT_OFFSET BIT(4) +#define HSIO_S6G_IB_CFG1_IB_FRC_HP BIT(3) +#define HSIO_S6G_IB_CFG1_IB_FRC_MID BIT(2) +#define HSIO_S6G_IB_CFG1_IB_FRC_LP BIT(1) +#define HSIO_S6G_IB_CFG1_IB_FRC_OFFSET BIT(0) + +#define HSIO_S6G_IB_CFG2_IB_TINFV(x) (((x) << 27) & GENMASK(29, 27)) +#define HSIO_S6G_IB_CFG2_IB_TINFV_M GENMASK(29, 27) +#define HSIO_S6G_IB_CFG2_IB_TINFV_X(x) (((x) & GENMASK(29, 27)) >> 27) +#define HSIO_S6G_IB_CFG2_IB_OINFI(x) (((x) << 22) & GENMASK(26, 22)) +#define HSIO_S6G_IB_CFG2_IB_OINFI_M GENMASK(26, 22) +#define HSIO_S6G_IB_CFG2_IB_OINFI_X(x) (((x) & GENMASK(26, 22)) >> 22) +#define HSIO_S6G_IB_CFG2_IB_TAUX(x) (((x) << 19) & GENMASK(21, 19)) +#define HSIO_S6G_IB_CFG2_IB_TAUX_M GENMASK(21, 19) +#define HSIO_S6G_IB_CFG2_IB_TAUX_X(x) (((x) & GENMASK(21, 19)) >> 19) +#define HSIO_S6G_IB_CFG2_IB_OINFS(x) (((x) << 16) & GENMASK(18, 16)) +#define HSIO_S6G_IB_CFG2_IB_OINFS_M GENMASK(18, 16) +#define HSIO_S6G_IB_CFG2_IB_OINFS_X(x) (((x) & GENMASK(18, 16)) >> 16) +#define HSIO_S6G_IB_CFG2_IB_OCALS(x) (((x) << 10) & GENMASK(15, 10)) +#define HSIO_S6G_IB_CFG2_IB_OCALS_M GENMASK(15, 10) +#define HSIO_S6G_IB_CFG2_IB_OCALS_X(x) (((x) & GENMASK(15, 10)) >> 10) +#define HSIO_S6G_IB_CFG2_IB_TCALV(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_S6G_IB_CFG2_IB_TCALV_M GENMASK(9, 5) +#define HSIO_S6G_IB_CFG2_IB_TCALV_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_S6G_IB_CFG2_IB_UMAX(x) (((x) << 3) & GENMASK(4, 3)) +#define HSIO_S6G_IB_CFG2_IB_UMAX_M GENMASK(4, 3) +#define HSIO_S6G_IB_CFG2_IB_UMAX_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define HSIO_S6G_IB_CFG2_IB_UREG(x) ((x) & GENMASK(2, 0)) +#define HSIO_S6G_IB_CFG2_IB_UREG_M GENMASK(2, 0) + +#define HSIO_S6G_IB_CFG3_IB_INI_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG3_IB_INI_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG3_IB_INI_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG3_IB_INI_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG3_IB_INI_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG3_IB_INI_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG3_IB_INI_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG3_IB_INI_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG3_IB_INI_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG3_IB_INI_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_IB_CFG4_IB_MAX_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG4_IB_MAX_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG4_IB_MAX_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG4_IB_MAX_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG4_IB_MAX_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG4_IB_MAX_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_IB_CFG5_IB_MIN_HP(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_CFG5_IB_MIN_HP_M GENMASK(23, 18) +#define HSIO_S6G_IB_CFG5_IB_MIN_HP_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID_M GENMASK(17, 12) +#define HSIO_S6G_IB_CFG5_IB_MIN_MID_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP_M GENMASK(11, 6) +#define HSIO_S6G_IB_CFG5_IB_MIN_LP_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_CFG5_IB_MIN_OFFSET_M GENMASK(5, 0) + +#define HSIO_S6G_OB_CFG_OB_IDLE BIT(31) +#define HSIO_S6G_OB_CFG_OB_ENA1V_MODE BIT(30) +#define HSIO_S6G_OB_CFG_OB_POL BIT(29) +#define HSIO_S6G_OB_CFG_OB_POST0(x) (((x) << 23) & GENMASK(28, 23)) +#define HSIO_S6G_OB_CFG_OB_POST0_M GENMASK(28, 23) +#define HSIO_S6G_OB_CFG_OB_POST0_X(x) (((x) & GENMASK(28, 23)) >> 23) +#define HSIO_S6G_OB_CFG_OB_PREC(x) (((x) << 18) & GENMASK(22, 18)) +#define HSIO_S6G_OB_CFG_OB_PREC_M GENMASK(22, 18) +#define HSIO_S6G_OB_CFG_OB_PREC_X(x) (((x) & GENMASK(22, 18)) >> 18) +#define HSIO_S6G_OB_CFG_OB_R_ADJ_MUX BIT(17) +#define HSIO_S6G_OB_CFG_OB_R_ADJ_PDR BIT(16) +#define HSIO_S6G_OB_CFG_OB_POST1(x) (((x) << 11) & GENMASK(15, 11)) +#define HSIO_S6G_OB_CFG_OB_POST1_M GENMASK(15, 11) +#define HSIO_S6G_OB_CFG_OB_POST1_X(x) (((x) & GENMASK(15, 11)) >> 11) +#define HSIO_S6G_OB_CFG_OB_R_COR BIT(10) +#define HSIO_S6G_OB_CFG_OB_SEL_RCTRL BIT(9) +#define HSIO_S6G_OB_CFG_OB_SR_H BIT(8) +#define HSIO_S6G_OB_CFG_OB_SR(x) (((x) << 4) & GENMASK(7, 4)) +#define HSIO_S6G_OB_CFG_OB_SR_M GENMASK(7, 4) +#define HSIO_S6G_OB_CFG_OB_SR_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL(x) ((x) & GENMASK(3, 0)) +#define HSIO_S6G_OB_CFG_OB_RESISTOR_CTRL_M GENMASK(3, 0) + +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS(x) (((x) << 6) & GENMASK(8, 6)) +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_M GENMASK(8, 6) +#define HSIO_S6G_OB_CFG1_OB_ENA_CAS_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define HSIO_S6G_OB_CFG1_OB_LEV(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_OB_CFG1_OB_LEV_M GENMASK(5, 0) + +#define HSIO_S6G_SER_CFG_SER_4TAP_ENA BIT(8) +#define HSIO_S6G_SER_CFG_SER_CPMD_SEL BIT(7) +#define HSIO_S6G_SER_CFG_SER_SWAP_CPMD BIT(6) +#define HSIO_S6G_SER_CFG_SER_ALISEL(x) (((x) << 4) & GENMASK(5, 4)) +#define HSIO_S6G_SER_CFG_SER_ALISEL_M GENMASK(5, 4) +#define HSIO_S6G_SER_CFG_SER_ALISEL_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define HSIO_S6G_SER_CFG_SER_ENHYS BIT(3) +#define HSIO_S6G_SER_CFG_SER_BIG_WIN BIT(2) +#define HSIO_S6G_SER_CFG_SER_EN_WIN BIT(1) +#define HSIO_S6G_SER_CFG_SER_ENALI BIT(0) + +#define HSIO_S6G_COMMON_CFG_SYS_RST BIT(17) +#define HSIO_S6G_COMMON_CFG_SE_DIV2_ENA BIT(16) +#define HSIO_S6G_COMMON_CFG_SE_AUTO_SQUELCH_ENA BIT(15) +#define HSIO_S6G_COMMON_CFG_ENA_LANE BIT(14) +#define HSIO_S6G_COMMON_CFG_PWD_RX BIT(13) +#define HSIO_S6G_COMMON_CFG_PWD_TX BIT(12) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL(x) (((x) << 9) & GENMASK(11, 9)) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL_M GENMASK(11, 9) +#define HSIO_S6G_COMMON_CFG_LANE_CTRL_X(x) (((x) & GENMASK(11, 9)) >> 9) +#define HSIO_S6G_COMMON_CFG_ENA_DIRECT BIT(8) +#define HSIO_S6G_COMMON_CFG_ENA_ELOOP BIT(7) +#define HSIO_S6G_COMMON_CFG_ENA_FLOOP BIT(6) +#define HSIO_S6G_COMMON_CFG_ENA_ILOOP BIT(5) +#define HSIO_S6G_COMMON_CFG_ENA_PLOOP BIT(4) +#define HSIO_S6G_COMMON_CFG_HRATE BIT(3) +#define HSIO_S6G_COMMON_CFG_QRATE BIT(2) +#define HSIO_S6G_COMMON_CFG_IF_MODE(x) ((x) & GENMASK(1, 0)) +#define HSIO_S6G_COMMON_CFG_IF_MODE_M GENMASK(1, 0) + +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS(x) (((x) << 16) & GENMASK(17, 16)) +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_M GENMASK(17, 16) +#define HSIO_S6G_PLL_CFG_PLL_ENA_OFFS_X(x) (((x) & GENMASK(17, 16)) >> 16) +#define HSIO_S6G_PLL_CFG_PLL_DIV4 BIT(15) +#define HSIO_S6G_PLL_CFG_PLL_ENA_ROT BIT(14) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA(x) (((x) << 6) & GENMASK(13, 6)) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_M GENMASK(13, 6) +#define HSIO_S6G_PLL_CFG_PLL_FSM_CTRL_DATA_X(x) (((x) & GENMASK(13, 6)) >> 6) +#define HSIO_S6G_PLL_CFG_PLL_FSM_ENA BIT(5) +#define HSIO_S6G_PLL_CFG_PLL_FSM_FORCE_SET_ENA BIT(4) +#define HSIO_S6G_PLL_CFG_PLL_FSM_OOR_RECAL_ENA BIT(3) +#define HSIO_S6G_PLL_CFG_PLL_RB_DATA_SEL BIT(2) +#define HSIO_S6G_PLL_CFG_PLL_ROT_DIR BIT(1) +#define HSIO_S6G_PLL_CFG_PLL_ROT_FRQ BIT(0) + +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_N BIT(5) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_DATA_P BIT(4) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_INIT_CLK BIT(3) +#define HSIO_S6G_ACJTAG_CFG_OB_DIRECT BIT(2) +#define HSIO_S6G_ACJTAG_CFG_ACJTAG_ENA BIT(1) +#define HSIO_S6G_ACJTAG_CFG_JTAG_CTRL_ENA BIT(0) + +#define HSIO_S6G_GP_CFG_GP_MSB(x) (((x) << 16) & GENMASK(31, 16)) +#define HSIO_S6G_GP_CFG_GP_MSB_M GENMASK(31, 16) +#define HSIO_S6G_GP_CFG_GP_MSB_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define HSIO_S6G_GP_CFG_GP_LSB(x) ((x) & GENMASK(15, 0)) +#define HSIO_S6G_GP_CFG_GP_LSB_M GENMASK(15, 0) + +#define HSIO_S6G_IB_STATUS0_IB_CAL_DONE BIT(8) +#define HSIO_S6G_IB_STATUS0_IB_HP_GAIN_ACT BIT(7) +#define HSIO_S6G_IB_STATUS0_IB_MID_GAIN_ACT BIT(6) +#define HSIO_S6G_IB_STATUS0_IB_LP_GAIN_ACT BIT(5) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ACT BIT(4) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_VLD BIT(3) +#define HSIO_S6G_IB_STATUS0_IB_OFFSET_ERR BIT(2) +#define HSIO_S6G_IB_STATUS0_IB_OFFSDIR BIT(1) +#define HSIO_S6G_IB_STATUS0_IB_SIG_DET BIT(0) + +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT(x) (((x) << 18) & GENMASK(23, 18)) +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_M GENMASK(23, 18) +#define HSIO_S6G_IB_STATUS1_IB_HP_GAIN_STAT_X(x) (((x) & GENMASK(23, 18)) >> 18) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT(x) (((x) << 12) & GENMASK(17, 12)) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_M GENMASK(17, 12) +#define HSIO_S6G_IB_STATUS1_IB_MID_GAIN_STAT_X(x) (((x) & GENMASK(17, 12)) >> 12) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT(x) (((x) << 6) & GENMASK(11, 6)) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_M GENMASK(11, 6) +#define HSIO_S6G_IB_STATUS1_IB_LP_GAIN_STAT_X(x) (((x) & GENMASK(11, 6)) >> 6) +#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT(x) ((x) & GENMASK(5, 0)) +#define HSIO_S6G_IB_STATUS1_IB_OFFSET_STAT_M GENMASK(5, 0) + +#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_N BIT(2) +#define HSIO_S6G_ACJTAG_STATUS_ACJTAG_CAPT_DATA_P BIT(1) +#define HSIO_S6G_ACJTAG_STATUS_IB_DIRECT BIT(0) + +#define HSIO_S6G_PLL_STATUS_PLL_CAL_NOT_DONE BIT(10) +#define HSIO_S6G_PLL_STATUS_PLL_CAL_ERR BIT(9) +#define HSIO_S6G_PLL_STATUS_PLL_OUT_OF_RANGE_ERR BIT(8) +#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA(x) ((x) & GENMASK(7, 0)) +#define HSIO_S6G_PLL_STATUS_PLL_RB_DATA_M GENMASK(7, 0) + +#define HSIO_S6G_REVID_SERDES_REV(x) (((x) << 26) & GENMASK(31, 26)) +#define HSIO_S6G_REVID_SERDES_REV_M GENMASK(31, 26) +#define HSIO_S6G_REVID_SERDES_REV_X(x) (((x) & GENMASK(31, 26)) >> 26) +#define HSIO_S6G_REVID_RCPLL_REV(x) (((x) << 21) & GENMASK(25, 21)) +#define HSIO_S6G_REVID_RCPLL_REV_M GENMASK(25, 21) +#define HSIO_S6G_REVID_RCPLL_REV_X(x) (((x) & GENMASK(25, 21)) >> 21) +#define HSIO_S6G_REVID_SER_REV(x) (((x) << 16) & GENMASK(20, 16)) +#define HSIO_S6G_REVID_SER_REV_M GENMASK(20, 16) +#define HSIO_S6G_REVID_SER_REV_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define HSIO_S6G_REVID_DES_REV(x) (((x) << 10) & GENMASK(15, 10)) +#define HSIO_S6G_REVID_DES_REV_M GENMASK(15, 10) +#define HSIO_S6G_REVID_DES_REV_X(x) (((x) & GENMASK(15, 10)) >> 10) +#define HSIO_S6G_REVID_OB_REV(x) (((x) << 5) & GENMASK(9, 5)) +#define HSIO_S6G_REVID_OB_REV_M GENMASK(9, 5) +#define HSIO_S6G_REVID_OB_REV_X(x) (((x) & GENMASK(9, 5)) >> 5) +#define HSIO_S6G_REVID_IB_REV(x) ((x) & GENMASK(4, 0)) +#define HSIO_S6G_REVID_IB_REV_M GENMASK(4, 0) + +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_WR_ONE_SHOT BIT(31) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_RD_ONE_SHOT BIT(30) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR(x) ((x) & GENMASK(24, 0)) +#define HSIO_MCB_S6G_ADDR_CFG_SERDES6G_ADDR_M GENMASK(24, 0) + +#define HSIO_HW_CFG_DEV2G5_10_MODE BIT(6) +#define HSIO_HW_CFG_DEV1G_9_MODE BIT(5) +#define HSIO_HW_CFG_DEV1G_6_MODE BIT(4) +#define HSIO_HW_CFG_DEV1G_5_MODE BIT(3) +#define HSIO_HW_CFG_DEV1G_4_MODE BIT(2) +#define HSIO_HW_CFG_PCIE_ENA BIT(1) +#define HSIO_HW_CFG_QSGMII_ENA BIT(0) + +#define HSIO_HW_QSGMII_CFG_SHYST_DIS BIT(3) +#define HSIO_HW_QSGMII_CFG_E_DET_ENA BIT(2) +#define HSIO_HW_QSGMII_CFG_USE_I1_ENA BIT(1) +#define HSIO_HW_QSGMII_CFG_FLIP_LANES BIT(0) + +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS(x) (((x) << 1) & GENMASK(6, 1)) +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_M GENMASK(6, 1) +#define HSIO_HW_QSGMII_STAT_DELAY_VAR_X200PS_X(x) (((x) & GENMASK(6, 1)) >> 1) +#define HSIO_HW_QSGMII_STAT_SYNC BIT(0) + +#define HSIO_CLK_CFG_CLKDIV_PHY(x) (((x) << 1) & GENMASK(8, 1)) +#define HSIO_CLK_CFG_CLKDIV_PHY_M GENMASK(8, 1) +#define HSIO_CLK_CFG_CLKDIV_PHY_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define HSIO_CLK_CFG_CLKDIV_PHY_DIS BIT(0) + +#define HSIO_TEMP_SENSOR_CTRL_FORCE_TEMP_RD BIT(5) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_RUN BIT(4) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_NO_RST BIT(3) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_POWER_UP BIT(2) +#define HSIO_TEMP_SENSOR_CTRL_FORCE_CLK BIT(1) +#define HSIO_TEMP_SENSOR_CTRL_SAMPLE_ENA BIT(0) + +#define HSIO_TEMP_SENSOR_CFG_RUN_WID(x) (((x) << 8) & GENMASK(15, 8)) +#define HSIO_TEMP_SENSOR_CFG_RUN_WID_M GENMASK(15, 8) +#define HSIO_TEMP_SENSOR_CFG_RUN_WID_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER(x) ((x) & GENMASK(7, 0)) +#define HSIO_TEMP_SENSOR_CFG_SAMPLE_PER_M GENMASK(7, 0) + +#define HSIO_TEMP_SENSOR_STAT_TEMP_VALID BIT(8) +#define HSIO_TEMP_SENSOR_STAT_TEMP(x) ((x) & GENMASK(7, 0)) +#define HSIO_TEMP_SENSOR_STAT_TEMP_M GENMASK(7, 0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c new file mode 100644 index 000000000000..c6db8ad31fdf --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> + +#include "ocelot.h" + +u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset) +{ + u16 target = reg >> TARGET_OFFSET; + u32 val; + + WARN_ON(!target); + + regmap_read(ocelot->targets[target], + ocelot->map[target][reg & REG_MASK] + offset, &val); + return val; +} +EXPORT_SYMBOL(__ocelot_read_ix); + +void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset) +{ + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + regmap_write(ocelot->targets[target], + ocelot->map[target][reg & REG_MASK] + offset, val); +} +EXPORT_SYMBOL(__ocelot_write_ix); + +void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, + u32 offset) +{ + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + regmap_update_bits(ocelot->targets[target], + ocelot->map[target][reg & REG_MASK] + offset, + mask, val); +} +EXPORT_SYMBOL(__ocelot_rmw_ix); + +u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) +{ + return readl(port->regs + reg); +} +EXPORT_SYMBOL(ocelot_port_readl); + +void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) +{ + writel(val, port->regs + reg); +} +EXPORT_SYMBOL(ocelot_port_writel); + +int ocelot_regfields_init(struct ocelot *ocelot, + const struct reg_field *const regfields) +{ + unsigned int i; + u16 target; + + for (i = 0; i < REGFIELD_MAX; i++) { + struct reg_field regfield = {}; + u32 reg = regfields[i].reg; + + if (!reg) + continue; + + target = regfields[i].reg >> TARGET_OFFSET; + + regfield.reg = ocelot->map[target][reg & REG_MASK]; + regfield.lsb = regfields[i].lsb; + regfield.msb = regfields[i].msb; + + ocelot->regfields[i] = + devm_regmap_field_alloc(ocelot->dev, + ocelot->targets[target], + regfield); + + if (IS_ERR(ocelot->regfields[i])) + return PTR_ERR(ocelot->regfields[i]); + } + + return 0; +} +EXPORT_SYMBOL(ocelot_regfields_init); + +static struct regmap_config ocelot_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +struct regmap *ocelot_io_platform_init(struct ocelot *ocelot, + struct platform_device *pdev, + const char *name) +{ + struct resource *res; + void __iomem *regs; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + regs = devm_ioremap_resource(ocelot->dev, res); + if (IS_ERR(regs)) + return ERR_CAST(regs); + + ocelot_regmap_config.name = name; + return devm_regmap_init_mmio(ocelot->dev, regs, + &ocelot_regmap_config); +} +EXPORT_SYMBOL(ocelot_io_platform_init); diff --git a/drivers/net/ethernet/mscc/ocelot_qs.h b/drivers/net/ethernet/mscc/ocelot_qs.h new file mode 100644 index 000000000000..d18ae726c01d --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_qs.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_QS_H_ +#define _MSCC_OCELOT_QS_H_ + +/* TODO handle BE */ +#define XTR_EOF_0 0x00000080U +#define XTR_EOF_1 0x01000080U +#define XTR_EOF_2 0x02000080U +#define XTR_EOF_3 0x03000080U +#define XTR_PRUNED 0x04000080U +#define XTR_ABORT 0x05000080U +#define XTR_ESCAPE 0x06000080U +#define XTR_NOT_READY 0x07000080U +#define XTR_VALID_BYTES(x) (4 - (((x) >> 24) & 3)) + +#define QS_XTR_GRP_CFG_RSZ 0x4 + +#define QS_XTR_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_XTR_GRP_CFG_MODE_M GENMASK(3, 2) +#define QS_XTR_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_XTR_GRP_CFG_STATUS_WORD_POS BIT(1) +#define QS_XTR_GRP_CFG_BYTE_SWAP BIT(0) + +#define QS_XTR_RD_RSZ 0x4 + +#define QS_XTR_FRM_PRUNING_RSZ 0x4 + +#define QS_XTR_CFG_DP_WM(x) (((x) << 5) & GENMASK(7, 5)) +#define QS_XTR_CFG_DP_WM_M GENMASK(7, 5) +#define QS_XTR_CFG_DP_WM_X(x) (((x) & GENMASK(7, 5)) >> 5) +#define QS_XTR_CFG_SCH_WM(x) (((x) << 2) & GENMASK(4, 2)) +#define QS_XTR_CFG_SCH_WM_M GENMASK(4, 2) +#define QS_XTR_CFG_SCH_WM_X(x) (((x) & GENMASK(4, 2)) >> 2) +#define QS_XTR_CFG_OFLW_ERR_STICKY(x) ((x) & GENMASK(1, 0)) +#define QS_XTR_CFG_OFLW_ERR_STICKY_M GENMASK(1, 0) + +#define QS_INJ_GRP_CFG_RSZ 0x4 + +#define QS_INJ_GRP_CFG_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_INJ_GRP_CFG_MODE_M GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) + +#define QS_INJ_WR_RSZ 0x4 + +#define QS_INJ_CTRL_RSZ 0x4 + +#define QS_INJ_CTRL_GAP_SIZE(x) (((x) << 21) & GENMASK(24, 21)) +#define QS_INJ_CTRL_GAP_SIZE_M GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_X(x) (((x) & GENMASK(24, 21)) >> 21) +#define QS_INJ_CTRL_ABORT BIT(20) +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_VLD_BYTES(x) (((x) << 16) & GENMASK(17, 16)) +#define QS_INJ_CTRL_VLD_BYTES_M GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_X(x) (((x) & GENMASK(17, 16)) >> 16) + +#define QS_INJ_STATUS_WMARK_REACHED(x) (((x) << 4) & GENMASK(5, 4)) +#define QS_INJ_STATUS_WMARK_REACHED_M GENMASK(5, 4) +#define QS_INJ_STATUS_WMARK_REACHED_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define QS_INJ_STATUS_FIFO_RDY(x) (((x) << 2) & GENMASK(3, 2)) +#define QS_INJ_STATUS_FIFO_RDY_M GENMASK(3, 2) +#define QS_INJ_STATUS_FIFO_RDY_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QS_INJ_STATUS_INJ_IN_PROGRESS(x) ((x) & GENMASK(1, 0)) +#define QS_INJ_STATUS_INJ_IN_PROGRESS_M GENMASK(1, 0) + +#define QS_INJ_ERR_RSZ 0x4 + +#define QS_INJ_ERR_ABORT_ERR_STICKY BIT(1) +#define QS_INJ_ERR_WR_ERR_STICKY BIT(0) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_qsys.h b/drivers/net/ethernet/mscc/ocelot_qsys.h new file mode 100644 index 000000000000..d8c63aa761be --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_qsys.h @@ -0,0 +1,270 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_QSYS_H_ +#define _MSCC_OCELOT_QSYS_H_ + +#define QSYS_PORT_MODE_RSZ 0x4 + +#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) +#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0) + +#define QSYS_SWITCH_PORT_MODE_RSZ 0x4 + +#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14) +#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11)) +#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11) +#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11) +#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10) +#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1)) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1) +#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0) + +#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5) +#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4) +#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3) +#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2) +#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1) +#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0) + +#define QSYS_EEE_CFG_RSZ 0x4 + +#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8) +#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0)) +#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0) + +#define QSYS_SW_STATUS_RSZ 0x4 + +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8)) +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8) +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8) +#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0)) +#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0) + +#define QSYS_QMAP_GSZ 0x4 + +#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5)) +#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5) +#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5) +#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2)) +#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2) +#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2) +#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0)) +#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0) + +#define QSYS_ISDX_SGRP_GSZ 0x4 + +#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4 + +#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9)) +#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9) +#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9) +#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8) +#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7) +#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0)) +#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0) + +#define QSYS_RED_PROFILE_RSZ 0x4 + +#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8) +#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0)) +#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0) + +#define QSYS_RES_CFG_GSZ 0x8 + +#define QSYS_RES_STAT_GSZ 0x8 + +#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12)) +#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12) +#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12) +#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0)) +#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0) + +#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2)) +#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2) +#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2) +#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0)) +#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0) + +#define QSYS_QMAXSDU_CFG_0_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_1_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_2_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_3_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_4_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_5_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_6_RSZ 0x4 + +#define QSYS_QMAXSDU_CFG_7_RSZ 0x4 + +#define QSYS_PREEMPTION_CFG_RSZ 0x4 + +#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0)) +#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0) +#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8)) +#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8) +#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8) +#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12)) +#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12) +#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12) +#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16)) +#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16) +#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16) + +#define QSYS_CIR_CFG_GSZ 0x80 + +#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6)) +#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6) +#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6) +#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0)) +#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0) + +#define QSYS_EIR_CFG_GSZ 0x80 + +#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7)) +#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7) +#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7) +#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1)) +#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1) +#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1) +#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0) + +#define QSYS_SE_CFG_GSZ 0x80 + +#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6)) +#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6) +#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6) +#define QSYS_SE_CFG_SE_RR_ENA BIT(5) +#define QSYS_SE_CFG_SE_AVB_ENA BIT(4) +#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2)) +#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2) +#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define QSYS_SE_CFG_SE_EXC_ENA BIT(1) +#define QSYS_SE_CFG_SE_EXC_FWD BIT(0) + +#define QSYS_SE_DWRR_CFG_GSZ 0x80 +#define QSYS_SE_DWRR_CFG_RSZ 0x4 + +#define QSYS_SE_CONNECT_GSZ 0x80 + +#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17)) +#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17) +#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17) +#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9)) +#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9) +#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9) +#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5)) +#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5) +#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5) +#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1)) +#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1) +#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1) +#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0) + +#define QSYS_SE_DLB_SENSE_GSZ 0x80 + +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11)) +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11) +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7)) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3)) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3) +#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2) +#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1) +#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0) + +#define QSYS_CIR_STATE_GSZ 0x80 + +#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4)) +#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4) +#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4) +#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0)) +#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0) + +#define QSYS_EIR_STATE_GSZ 0x80 + +#define QSYS_SE_STATE_GSZ 0x80 + +#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1)) +#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1) +#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1) +#define QSYS_SE_STATE_SE_WAS_YEL BIT(0) + +#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8) +#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3)) +#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3) +#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3) +#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2) +#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1) +#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0) + +#define QSYS_TAG_CONFIG_RSZ 0x4 + +#define QSYS_TAG_CONFIG_ENABLE BIT(0) +#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4)) +#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4) +#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4) +#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8) +#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8) +#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16)) +#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16) +#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16) + +#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0)) +#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0) +#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8) +#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16) + +#define QSYS_PORT_MAX_SDU_RSZ 0x4 + +#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0) +#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16)) +#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16) +#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16) + +#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0)) +#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0) +#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8) +#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8) + +#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0) +#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16)) +#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16) +#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16) + +#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0)) +#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0) +#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16)) +#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16) +#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16) +#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24) + +#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0)) +#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0) +#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8)) +#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8) +#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8) + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c new file mode 100644 index 000000000000..e334b406c40c --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_regs.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ +#include "ocelot.h" + +static const u32 ocelot_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x009000), + REG(ANA_VLANMASK, 0x009004), + REG(ANA_PORT_B_DOMAIN, 0x009008), + REG(ANA_ANAGEFIL, 0x00900c), + REG(ANA_ANEVENTS, 0x009010), + REG(ANA_STORMLIMIT_BURST, 0x009014), + REG(ANA_STORMLIMIT_CFG, 0x009018), + REG(ANA_ISOLATED_PORTS, 0x009028), + REG(ANA_COMMUNITY_PORTS, 0x00902c), + REG(ANA_AUTOAGE, 0x009030), + REG(ANA_MACTOPTIONS, 0x009034), + REG(ANA_LEARNDISC, 0x009038), + REG(ANA_AGENCTRL, 0x00903c), + REG(ANA_MIRRORPORTS, 0x009040), + REG(ANA_EMIRRORPORTS, 0x009044), + REG(ANA_FLOODING, 0x009048), + REG(ANA_FLOODING_IPMC, 0x00904c), + REG(ANA_SFLOW_CFG, 0x009050), + REG(ANA_PORT_MODE, 0x009080), + REG(ANA_PGID_PGID, 0x008c00), + REG(ANA_TABLES_ANMOVED, 0x008b30), + REG(ANA_TABLES_MACHDATA, 0x008b34), + REG(ANA_TABLES_MACLDATA, 0x008b38), + REG(ANA_TABLES_MACACCESS, 0x008b3c), + REG(ANA_TABLES_MACTINDX, 0x008b40), + REG(ANA_TABLES_VLANACCESS, 0x008b44), + REG(ANA_TABLES_VLANTIDX, 0x008b48), + REG(ANA_TABLES_ISDXACCESS, 0x008b4c), + REG(ANA_TABLES_ISDXTIDX, 0x008b50), + REG(ANA_TABLES_ENTRYLIM, 0x008b00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), + REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), + REG(ANA_MSTI_STATE, 0x008e00), + REG(ANA_PORT_VLAN_CFG, 0x007000), + REG(ANA_PORT_DROP_CFG, 0x007004), + REG(ANA_PORT_QOS_CFG, 0x007008), + REG(ANA_PORT_VCAP_CFG, 0x00700c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007020), + REG(ANA_PORT_CPU_FWD_CFG, 0x007060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), + REG(ANA_PORT_PORT_CFG, 0x007070), + REG(ANA_PORT_POL_CFG, 0x007074), + REG(ANA_PORT_PTP_CFG, 0x007078), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), + REG(ANA_OAM_UPM_LM_CNT, 0x007c00), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG(ANA_PFC_PFC_TIMER, 0x008804), + REG(ANA_IPT_OAM_MEP_CFG, 0x008000), + REG(ANA_IPT_IPT, 0x008004), + REG(ANA_PPT_PPT, 0x008ac0), + REG(ANA_FID_MAP_FID_MAP, 0x000000), + REG(ANA_AGGR_CFG, 0x0090b4), + REG(ANA_CPUQ_CFG, 0x0090b8), + REG(ANA_CPUQ_CFG2, 0x0090bc), + REG(ANA_CPUQ_8021_CFG, 0x0090c0), + REG(ANA_DSCP_CFG, 0x009100), + REG(ANA_DSCP_REWR_CFG, 0x009200), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), + REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), + REG(ANA_VRAP_CFG, 0x009280), + REG(ANA_VRAP_HDR_DATA, 0x009284), + REG(ANA_VRAP_HDR_MASK, 0x009288), + REG(ANA_DISCARD_CFG, 0x00928c), + REG(ANA_FID_CFG, 0x009290), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG(ANA_POL_STATE, 0x004014), + REG(ANA_POL_FLOWC, 0x008b80), + REG(ANA_POL_HYST, 0x008bec), + REG(ANA_POL_MISC_CFG, 0x008bf0), +}; + +static const u32 ocelot_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG(QS_INH_DBG, 0x000048), +}; + +static const u32 ocelot_hsio_regmap[] = { + REG(HSIO_PLL5G_CFG0, 0x000000), + REG(HSIO_PLL5G_CFG1, 0x000004), + REG(HSIO_PLL5G_CFG2, 0x000008), + REG(HSIO_PLL5G_CFG3, 0x00000c), + REG(HSIO_PLL5G_CFG4, 0x000010), + REG(HSIO_PLL5G_CFG5, 0x000014), + REG(HSIO_PLL5G_CFG6, 0x000018), + REG(HSIO_PLL5G_STATUS0, 0x00001c), + REG(HSIO_PLL5G_STATUS1, 0x000020), + REG(HSIO_PLL5G_BIST_CFG0, 0x000024), + REG(HSIO_PLL5G_BIST_CFG1, 0x000028), + REG(HSIO_PLL5G_BIST_CFG2, 0x00002c), + REG(HSIO_PLL5G_BIST_STAT0, 0x000030), + REG(HSIO_PLL5G_BIST_STAT1, 0x000034), + REG(HSIO_RCOMP_CFG0, 0x000038), + REG(HSIO_RCOMP_STATUS, 0x00003c), + REG(HSIO_SYNC_ETH_CFG, 0x000040), + REG(HSIO_SYNC_ETH_PLL_CFG, 0x000048), + REG(HSIO_S1G_DES_CFG, 0x00004c), + REG(HSIO_S1G_IB_CFG, 0x000050), + REG(HSIO_S1G_OB_CFG, 0x000054), + REG(HSIO_S1G_SER_CFG, 0x000058), + REG(HSIO_S1G_COMMON_CFG, 0x00005c), + REG(HSIO_S1G_PLL_CFG, 0x000060), + REG(HSIO_S1G_PLL_STATUS, 0x000064), + REG(HSIO_S1G_DFT_CFG0, 0x000068), + REG(HSIO_S1G_DFT_CFG1, 0x00006c), + REG(HSIO_S1G_DFT_CFG2, 0x000070), + REG(HSIO_S1G_TP_CFG, 0x000074), + REG(HSIO_S1G_RC_PLL_BIST_CFG, 0x000078), + REG(HSIO_S1G_MISC_CFG, 0x00007c), + REG(HSIO_S1G_DFT_STATUS, 0x000080), + REG(HSIO_S1G_MISC_STATUS, 0x000084), + REG(HSIO_MCB_S1G_ADDR_CFG, 0x000088), + REG(HSIO_S6G_DIG_CFG, 0x00008c), + REG(HSIO_S6G_DFT_CFG0, 0x000090), + REG(HSIO_S6G_DFT_CFG1, 0x000094), + REG(HSIO_S6G_DFT_CFG2, 0x000098), + REG(HSIO_S6G_TP_CFG0, 0x00009c), + REG(HSIO_S6G_TP_CFG1, 0x0000a0), + REG(HSIO_S6G_RC_PLL_BIST_CFG, 0x0000a4), + REG(HSIO_S6G_MISC_CFG, 0x0000a8), + REG(HSIO_S6G_OB_ANEG_CFG, 0x0000ac), + REG(HSIO_S6G_DFT_STATUS, 0x0000b0), + REG(HSIO_S6G_ERR_CNT, 0x0000b4), + REG(HSIO_S6G_MISC_STATUS, 0x0000b8), + REG(HSIO_S6G_DES_CFG, 0x0000bc), + REG(HSIO_S6G_IB_CFG, 0x0000c0), + REG(HSIO_S6G_IB_CFG1, 0x0000c4), + REG(HSIO_S6G_IB_CFG2, 0x0000c8), + REG(HSIO_S6G_IB_CFG3, 0x0000cc), + REG(HSIO_S6G_IB_CFG4, 0x0000d0), + REG(HSIO_S6G_IB_CFG5, 0x0000d4), + REG(HSIO_S6G_OB_CFG, 0x0000d8), + REG(HSIO_S6G_OB_CFG1, 0x0000dc), + REG(HSIO_S6G_SER_CFG, 0x0000e0), + REG(HSIO_S6G_COMMON_CFG, 0x0000e4), + REG(HSIO_S6G_PLL_CFG, 0x0000e8), + REG(HSIO_S6G_ACJTAG_CFG, 0x0000ec), + REG(HSIO_S6G_GP_CFG, 0x0000f0), + REG(HSIO_S6G_IB_STATUS0, 0x0000f4), + REG(HSIO_S6G_IB_STATUS1, 0x0000f8), + REG(HSIO_S6G_ACJTAG_STATUS, 0x0000fc), + REG(HSIO_S6G_PLL_STATUS, 0x000100), + REG(HSIO_S6G_REVID, 0x000104), + REG(HSIO_MCB_S6G_ADDR_CFG, 0x000108), + REG(HSIO_HW_CFG, 0x00010c), + REG(HSIO_HW_QSGMII_CFG, 0x000110), + REG(HSIO_HW_QSGMII_STAT, 0x000114), + REG(HSIO_CLK_CFG, 0x000118), + REG(HSIO_TEMP_SENSOR_CTRL, 0x00011c), + REG(HSIO_TEMP_SENSOR_CFG, 0x000120), + REG(HSIO_TEMP_SENSOR_STAT, 0x000124), +}; + +static const u32 ocelot_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x011200), + REG(QSYS_SWITCH_PORT_MODE, 0x011234), + REG(QSYS_STAT_CNT_CFG, 0x011264), + REG(QSYS_EEE_CFG, 0x011268), + REG(QSYS_EEE_THRES, 0x011294), + REG(QSYS_IGR_NO_SHARING, 0x011298), + REG(QSYS_EGR_NO_SHARING, 0x01129c), + REG(QSYS_SW_STATUS, 0x0112a0), + REG(QSYS_EXT_CPU_CFG, 0x0112d0), + REG(QSYS_PAD_CFG, 0x0112d4), + REG(QSYS_CPU_GROUP_MAP, 0x0112d8), + REG(QSYS_QMAP, 0x0112dc), + REG(QSYS_ISDX_SGRP, 0x011400), + REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), + REG(QSYS_TFRM_MISC, 0x011310), + REG(QSYS_TFRM_PORT_DLY, 0x011314), + REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), + REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), + REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), + REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), + REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), + REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), + REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), + REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), + REG(QSYS_RED_PROFILE, 0x011338), + REG(QSYS_RES_QOS_MODE, 0x011378), + REG(QSYS_RES_CFG, 0x012000), + REG(QSYS_RES_STAT, 0x012004), + REG(QSYS_EGR_DROP_MODE, 0x01137c), + REG(QSYS_EQ_CTRL, 0x011380), + REG(QSYS_EVENTS_CORE, 0x011384), + REG(QSYS_CIR_CFG, 0x000000), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG(QSYS_SE_CONNECT, 0x00003c), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG(QSYS_SE_STATE, 0x00004c), + REG(QSYS_HSCH_MISC_CFG, 0x011388), +}; + +static const u32 ocelot_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), + REG(REW_DSCP_REMAP_CFG, 0x000790), + REG(REW_STAT_CFG, 0x000890), + REG(REW_PPT, 0x000680), +}; + +static const u32 ocelot_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_1023, 0x000030), + REG(SYS_COUNT_RX_1024_1526, 0x000034), + REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_PAUSE, 0x00003c), + REG(SYS_COUNT_RX_CONTROL, 0x000040), + REG(SYS_COUNT_RX_LONGS, 0x000044), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_511, 0x000124), + REG(SYS_COUNT_TX_512_1023, 0x000128), + REG(SYS_COUNT_TX_1024_1526, 0x00012c), + REG(SYS_COUNT_TX_1527_MAX, 0x000130), + REG(SYS_COUNT_TX_AGING, 0x000170), + REG(SYS_RESET_CFG, 0x000508), + REG(SYS_CMID, 0x00050c), + REG(SYS_VLAN_ETYPE_CFG, 0x000510), + REG(SYS_PORT_MODE, 0x000514), + REG(SYS_FRONT_PORT_MODE, 0x000548), + REG(SYS_FRM_AGING, 0x000574), + REG(SYS_STAT_CFG, 0x000578), + REG(SYS_SW_STATUS, 0x00057c), + REG(SYS_MISC_CFG, 0x0005ac), + REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), + REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), + REG(SYS_CM_ADDR, 0x000500), + REG(SYS_CM_DATA, 0x000504), + REG(SYS_PAUSE_CFG, 0x000608), + REG(SYS_PAUSE_TOT_CFG, 0x000638), + REG(SYS_ATOP, 0x00063c), + REG(SYS_ATOP_TOT_CFG, 0x00066c), + REG(SYS_MAC_FC_CFG, 0x000670), + REG(SYS_MMGT, 0x00069c), + REG(SYS_MMGT_FAST, 0x0006a0), + REG(SYS_EVENTS_DIF, 0x0006a4), + REG(SYS_EVENTS_CORE, 0x0006b4), + REG(SYS_CNT, 0x000000), + REG(SYS_PTP_STATUS, 0x0006b8), + REG(SYS_PTP_TXSTAMP, 0x0006bc), + REG(SYS_PTP_NXT, 0x0006c0), + REG(SYS_PTP_CFG, 0x0006c4), +}; + +static const u32 *ocelot_regmap[] = { + [ANA] = ocelot_ana_regmap, + [QS] = ocelot_qs_regmap, + [HSIO] = ocelot_hsio_regmap, + [QSYS] = ocelot_qsys_regmap, + [REW] = ocelot_rew_regmap, + [SYS] = ocelot_sys_regmap, +}; + +static const struct reg_field ocelot_regfields[] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10), + [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27), + [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26), + [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9), + [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20), + [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19), + [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), + [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), + [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), +}; + +static const struct ocelot_stat_layout ocelot_stats_layout[] = { + { .name = "rx_octets", .offset = 0x00, }, + { .name = "rx_unicast", .offset = 0x01, }, + { .name = "rx_multicast", .offset = 0x02, }, + { .name = "rx_broadcast", .offset = 0x03, }, + { .name = "rx_shorts", .offset = 0x04, }, + { .name = "rx_fragments", .offset = 0x05, }, + { .name = "rx_jabbers", .offset = 0x06, }, + { .name = "rx_crc_align_errs", .offset = 0x07, }, + { .name = "rx_sym_errs", .offset = 0x08, }, + { .name = "rx_frames_below_65_octets", .offset = 0x09, }, + { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, }, + { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, }, + { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, }, + { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, }, + { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, }, + { .name = "rx_frames_over_1526_octets", .offset = 0x0F, }, + { .name = "rx_pause", .offset = 0x10, }, + { .name = "rx_control", .offset = 0x11, }, + { .name = "rx_longs", .offset = 0x12, }, + { .name = "rx_classified_drops", .offset = 0x13, }, + { .name = "rx_red_prio_0", .offset = 0x14, }, + { .name = "rx_red_prio_1", .offset = 0x15, }, + { .name = "rx_red_prio_2", .offset = 0x16, }, + { .name = "rx_red_prio_3", .offset = 0x17, }, + { .name = "rx_red_prio_4", .offset = 0x18, }, + { .name = "rx_red_prio_5", .offset = 0x19, }, + { .name = "rx_red_prio_6", .offset = 0x1A, }, + { .name = "rx_red_prio_7", .offset = 0x1B, }, + { .name = "rx_yellow_prio_0", .offset = 0x1C, }, + { .name = "rx_yellow_prio_1", .offset = 0x1D, }, + { .name = "rx_yellow_prio_2", .offset = 0x1E, }, + { .name = "rx_yellow_prio_3", .offset = 0x1F, }, + { .name = "rx_yellow_prio_4", .offset = 0x20, }, + { .name = "rx_yellow_prio_5", .offset = 0x21, }, + { .name = "rx_yellow_prio_6", .offset = 0x22, }, + { .name = "rx_yellow_prio_7", .offset = 0x23, }, + { .name = "rx_green_prio_0", .offset = 0x24, }, + { .name = "rx_green_prio_1", .offset = 0x25, }, + { .name = "rx_green_prio_2", .offset = 0x26, }, + { .name = "rx_green_prio_3", .offset = 0x27, }, + { .name = "rx_green_prio_4", .offset = 0x28, }, + { .name = "rx_green_prio_5", .offset = 0x29, }, + { .name = "rx_green_prio_6", .offset = 0x2A, }, + { .name = "rx_green_prio_7", .offset = 0x2B, }, + { .name = "tx_octets", .offset = 0x40, }, + { .name = "tx_unicast", .offset = 0x41, }, + { .name = "tx_multicast", .offset = 0x42, }, + { .name = "tx_broadcast", .offset = 0x43, }, + { .name = "tx_collision", .offset = 0x44, }, + { .name = "tx_drops", .offset = 0x45, }, + { .name = "tx_pause", .offset = 0x46, }, + { .name = "tx_frames_below_65_octets", .offset = 0x47, }, + { .name = "tx_frames_65_to_127_octets", .offset = 0x48, }, + { .name = "tx_frames_128_255_octets", .offset = 0x49, }, + { .name = "tx_frames_256_511_octets", .offset = 0x4A, }, + { .name = "tx_frames_512_1023_octets", .offset = 0x4B, }, + { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, }, + { .name = "tx_frames_over_1526_octets", .offset = 0x4D, }, + { .name = "tx_yellow_prio_0", .offset = 0x4E, }, + { .name = "tx_yellow_prio_1", .offset = 0x4F, }, + { .name = "tx_yellow_prio_2", .offset = 0x50, }, + { .name = "tx_yellow_prio_3", .offset = 0x51, }, + { .name = "tx_yellow_prio_4", .offset = 0x52, }, + { .name = "tx_yellow_prio_5", .offset = 0x53, }, + { .name = "tx_yellow_prio_6", .offset = 0x54, }, + { .name = "tx_yellow_prio_7", .offset = 0x55, }, + { .name = "tx_green_prio_0", .offset = 0x56, }, + { .name = "tx_green_prio_1", .offset = 0x57, }, + { .name = "tx_green_prio_2", .offset = 0x58, }, + { .name = "tx_green_prio_3", .offset = 0x59, }, + { .name = "tx_green_prio_4", .offset = 0x5A, }, + { .name = "tx_green_prio_5", .offset = 0x5B, }, + { .name = "tx_green_prio_6", .offset = 0x5C, }, + { .name = "tx_green_prio_7", .offset = 0x5D, }, + { .name = "tx_aged", .offset = 0x5E, }, + { .name = "drop_local", .offset = 0x80, }, + { .name = "drop_tail", .offset = 0x81, }, + { .name = "drop_yellow_prio_0", .offset = 0x82, }, + { .name = "drop_yellow_prio_1", .offset = 0x83, }, + { .name = "drop_yellow_prio_2", .offset = 0x84, }, + { .name = "drop_yellow_prio_3", .offset = 0x85, }, + { .name = "drop_yellow_prio_4", .offset = 0x86, }, + { .name = "drop_yellow_prio_5", .offset = 0x87, }, + { .name = "drop_yellow_prio_6", .offset = 0x88, }, + { .name = "drop_yellow_prio_7", .offset = 0x89, }, + { .name = "drop_green_prio_0", .offset = 0x8A, }, + { .name = "drop_green_prio_1", .offset = 0x8B, }, + { .name = "drop_green_prio_2", .offset = 0x8C, }, + { .name = "drop_green_prio_3", .offset = 0x8D, }, + { .name = "drop_green_prio_4", .offset = 0x8E, }, + { .name = "drop_green_prio_5", .offset = 0x8F, }, + { .name = "drop_green_prio_6", .offset = 0x90, }, + { .name = "drop_green_prio_7", .offset = 0x91, }, +}; + +static void ocelot_pll5_init(struct ocelot *ocelot) +{ + /* Configure PLL5. This will need a proper CCF driver + * The values are coming from the VTSS API for Ocelot + */ + ocelot_write(ocelot, HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | + HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8), HSIO_PLL5G_CFG4); + ocelot_write(ocelot, HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | + HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | + HSIO_PLL5G_CFG0_ENA_BIAS | + HSIO_PLL5G_CFG0_ENA_VCO_BUF | + HSIO_PLL5G_CFG0_ENA_CP1 | + HSIO_PLL5G_CFG0_SELCPI(2) | + HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) | + HSIO_PLL5G_CFG0_SELBGV820(4) | + HSIO_PLL5G_CFG0_DIV4 | + HSIO_PLL5G_CFG0_ENA_CLKTREE | + HSIO_PLL5G_CFG0_ENA_LANE, HSIO_PLL5G_CFG0); + ocelot_write(ocelot, HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | + HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | + HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | + HSIO_PLL5G_CFG2_ENA_AMPCTRL | + HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | + HSIO_PLL5G_CFG2_AMPC_SEL(0x10), HSIO_PLL5G_CFG2); +} + +int ocelot_chip_init(struct ocelot *ocelot) +{ + int ret; + + ocelot->map = ocelot_regmap; + ocelot->stats_layout = ocelot_stats_layout; + ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); + ocelot->shared_queue_sz = 224 * 1024; + + ret = ocelot_regfields_init(ocelot, ocelot_regfields); + if (ret) + return ret; + + ocelot_pll5_init(ocelot); + + eth_random_addr(ocelot->base_mac); + ocelot->base_mac[5] &= 0xf0; + + return 0; +} +EXPORT_SYMBOL(ocelot_chip_init); diff --git a/drivers/net/ethernet/mscc/ocelot_rew.h b/drivers/net/ethernet/mscc/ocelot_rew.h new file mode 100644 index 000000000000..210914b7e20f --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_rew.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_REW_H_ +#define _MSCC_OCELOT_REW_H_ + +#define REW_PORT_VLAN_CFG_GSZ 0x80 + +#define REW_PORT_VLAN_CFG_PORT_TPID(x) (((x) << 16) & GENMASK(31, 16)) +#define REW_PORT_VLAN_CFG_PORT_TPID_M GENMASK(31, 16) +#define REW_PORT_VLAN_CFG_PORT_TPID_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define REW_PORT_VLAN_CFG_PORT_DEI BIT(15) +#define REW_PORT_VLAN_CFG_PORT_PCP(x) (((x) << 12) & GENMASK(14, 12)) +#define REW_PORT_VLAN_CFG_PORT_PCP_M GENMASK(14, 12) +#define REW_PORT_VLAN_CFG_PORT_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12) +#define REW_PORT_VLAN_CFG_PORT_VID(x) ((x) & GENMASK(11, 0)) +#define REW_PORT_VLAN_CFG_PORT_VID_M GENMASK(11, 0) + +#define REW_TAG_CFG_GSZ 0x80 + +#define REW_TAG_CFG_TAG_CFG(x) (((x) << 7) & GENMASK(8, 7)) +#define REW_TAG_CFG_TAG_CFG_M GENMASK(8, 7) +#define REW_TAG_CFG_TAG_CFG_X(x) (((x) & GENMASK(8, 7)) >> 7) +#define REW_TAG_CFG_TAG_TPID_CFG(x) (((x) << 5) & GENMASK(6, 5)) +#define REW_TAG_CFG_TAG_TPID_CFG_M GENMASK(6, 5) +#define REW_TAG_CFG_TAG_TPID_CFG_X(x) (((x) & GENMASK(6, 5)) >> 5) +#define REW_TAG_CFG_TAG_VID_CFG BIT(4) +#define REW_TAG_CFG_TAG_PCP_CFG(x) (((x) << 2) & GENMASK(3, 2)) +#define REW_TAG_CFG_TAG_PCP_CFG_M GENMASK(3, 2) +#define REW_TAG_CFG_TAG_PCP_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2) +#define REW_TAG_CFG_TAG_DEI_CFG(x) ((x) & GENMASK(1, 0)) +#define REW_TAG_CFG_TAG_DEI_CFG_M GENMASK(1, 0) + +#define REW_PORT_CFG_GSZ 0x80 + +#define REW_PORT_CFG_ES0_EN BIT(5) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG(x) (((x) << 3) & GENMASK(4, 3)) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_M GENMASK(4, 3) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA BIT(2) +#define REW_PORT_CFG_FLUSH_ENA BIT(1) +#define REW_PORT_CFG_AGE_DIS BIT(0) + +#define REW_DSCP_CFG_GSZ 0x80 + +#define REW_PCP_DEI_QOS_MAP_CFG_GSZ 0x80 +#define REW_PCP_DEI_QOS_MAP_CFG_RSZ 0x4 + +#define REW_PCP_DEI_QOS_MAP_CFG_DEI_QOS_VAL BIT(3) +#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL(x) ((x) & GENMASK(2, 0)) +#define REW_PCP_DEI_QOS_MAP_CFG_PCP_QOS_VAL_M GENMASK(2, 0) + +#define REW_PTP_CFG_GSZ 0x80 + +#define REW_PTP_CFG_PTP_BACKPLANE_MODE BIT(7) +#define REW_PTP_CFG_GP_CFG_UNUSED(x) (((x) << 3) & GENMASK(6, 3)) +#define REW_PTP_CFG_GP_CFG_UNUSED_M GENMASK(6, 3) +#define REW_PTP_CFG_GP_CFG_UNUSED_X(x) (((x) & GENMASK(6, 3)) >> 3) +#define REW_PTP_CFG_PTP_1STEP_DIS BIT(2) +#define REW_PTP_CFG_PTP_2STEP_DIS BIT(1) +#define REW_PTP_CFG_PTP_UDP_KEEP BIT(0) + +#define REW_PTP_DLY1_CFG_GSZ 0x80 + +#define REW_RED_TAG_CFG_GSZ 0x80 + +#define REW_RED_TAG_CFG_RED_TAG_CFG BIT(0) + +#define REW_DSCP_REMAP_DP1_CFG_RSZ 0x4 + +#define REW_DSCP_REMAP_CFG_RSZ 0x4 + +#define REW_REW_STICKY_ES0_TAGB_PUSH_FAILED BIT(0) + +#define REW_PPT_RSZ 0x4 + +#endif diff --git a/drivers/net/ethernet/mscc/ocelot_sys.h b/drivers/net/ethernet/mscc/ocelot_sys.h new file mode 100644 index 000000000000..16f91e172bcb --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_sys.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * Copyright (c) 2017 Microsemi Corporation + */ + +#ifndef _MSCC_OCELOT_SYS_H_ +#define _MSCC_OCELOT_SYS_H_ + +#define SYS_COUNT_RX_OCTETS_RSZ 0x4 + +#define SYS_COUNT_TX_OCTETS_RSZ 0x4 + +#define SYS_PORT_MODE_RSZ 0x4 + +#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5)) +#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5) +#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5) +#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3)) +#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3) +#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3) +#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1)) +#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1) +#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1) +#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0) + +#define SYS_FRONT_PORT_MODE_RSZ 0x4 + +#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0) + +#define SYS_FRM_AGING_AGE_TX_ENA BIT(20) +#define SYS_FRM_AGING_MAX_AGE(x) ((x) & GENMASK(19, 0)) +#define SYS_FRM_AGING_MAX_AGE_M GENMASK(19, 0) + +#define SYS_STAT_CFG_STAT_CLEAR_SHOT(x) (((x) << 10) & GENMASK(16, 10)) +#define SYS_STAT_CFG_STAT_CLEAR_SHOT_M GENMASK(16, 10) +#define SYS_STAT_CFG_STAT_CLEAR_SHOT_X(x) (((x) & GENMASK(16, 10)) >> 10) +#define SYS_STAT_CFG_STAT_VIEW(x) ((x) & GENMASK(9, 0)) +#define SYS_STAT_CFG_STAT_VIEW_M GENMASK(9, 0) + +#define SYS_SW_STATUS_RSZ 0x4 + +#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0) + +#define SYS_MISC_CFG_PTP_RSRV_CLR BIT(1) +#define SYS_MISC_CFG_PTP_DIS_NEG_RO BIT(0) + +#define SYS_REW_MAC_HIGH_CFG_RSZ 0x4 + +#define SYS_REW_MAC_LOW_CFG_RSZ 0x4 + +#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG(x) (((x) << 6) & GENMASK(21, 6)) +#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_M GENMASK(21, 6) +#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_X(x) (((x) & GENMASK(21, 6)) >> 6) +#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0)) +#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0) + +#define SYS_PAUSE_CFG_RSZ 0x4 + +#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10)) +#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10) +#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10) +#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1)) +#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1) +#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1) +#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) + +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9)) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP(x) ((x) & GENMASK(8, 0)) +#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP_M GENMASK(8, 0) + +#define SYS_ATOP_RSZ 0x4 + +#define SYS_MAC_FC_CFG_RSZ 0x4 + +#define SYS_MAC_FC_CFG_FC_LINK_SPEED(x) (((x) << 26) & GENMASK(27, 26)) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_M GENMASK(27, 26) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_X(x) (((x) & GENMASK(27, 26)) >> 26) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG(x) (((x) << 20) & GENMASK(25, 20)) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_M GENMASK(25, 20) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_X(x) (((x) & GENMASK(25, 20)) >> 20) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18) +#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17) +#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG(x) ((x) & GENMASK(15, 0)) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_M GENMASK(15, 0) + +#define SYS_MMGT_RELCNT(x) (((x) << 16) & GENMASK(31, 16)) +#define SYS_MMGT_RELCNT_M GENMASK(31, 16) +#define SYS_MMGT_RELCNT_X(x) (((x) & GENMASK(31, 16)) >> 16) +#define SYS_MMGT_FREECNT(x) ((x) & GENMASK(15, 0)) +#define SYS_MMGT_FREECNT_M GENMASK(15, 0) + +#define SYS_MMGT_FAST_FREEVLD(x) (((x) << 4) & GENMASK(7, 4)) +#define SYS_MMGT_FAST_FREEVLD_M GENMASK(7, 4) +#define SYS_MMGT_FAST_FREEVLD_X(x) (((x) & GENMASK(7, 4)) >> 4) +#define SYS_MMGT_FAST_RELVLD(x) ((x) & GENMASK(3, 0)) +#define SYS_MMGT_FAST_RELVLD_M GENMASK(3, 0) + +#define SYS_EVENTS_DIF_RSZ 0x4 + +#define SYS_EVENTS_DIF_EV_DRX(x) (((x) << 6) & GENMASK(8, 6)) +#define SYS_EVENTS_DIF_EV_DRX_M GENMASK(8, 6) +#define SYS_EVENTS_DIF_EV_DRX_X(x) (((x) & GENMASK(8, 6)) >> 6) +#define SYS_EVENTS_DIF_EV_DTX(x) ((x) & GENMASK(5, 0)) +#define SYS_EVENTS_DIF_EV_DTX_M GENMASK(5, 0) + +#define SYS_EVENTS_CORE_EV_FWR BIT(2) +#define SYS_EVENTS_CORE_EV_ANA(x) ((x) & GENMASK(1, 0)) +#define SYS_EVENTS_CORE_EV_ANA_M GENMASK(1, 0) + +#define SYS_CNT_GSZ 0x4 + +#define SYS_PTP_STATUS_PTP_TXSTAMP_OAM BIT(29) +#define SYS_PTP_STATUS_PTP_OVFL BIT(28) +#define SYS_PTP_STATUS_PTP_MESS_VLD BIT(27) +#define SYS_PTP_STATUS_PTP_MESS_ID(x) (((x) << 21) & GENMASK(26, 21)) +#define SYS_PTP_STATUS_PTP_MESS_ID_M GENMASK(26, 21) +#define SYS_PTP_STATUS_PTP_MESS_ID_X(x) (((x) & GENMASK(26, 21)) >> 21) +#define SYS_PTP_STATUS_PTP_MESS_TXPORT(x) (((x) << 16) & GENMASK(20, 16)) +#define SYS_PTP_STATUS_PTP_MESS_TXPORT_M GENMASK(20, 16) +#define SYS_PTP_STATUS_PTP_MESS_TXPORT_X(x) (((x) & GENMASK(20, 16)) >> 16) +#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID(x) ((x) & GENMASK(15, 0)) +#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID_M GENMASK(15, 0) + +#define SYS_PTP_TXSTAMP_PTP_TXSTAMP(x) ((x) & GENMASK(29, 0)) +#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_M GENMASK(29, 0) +#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC BIT(31) + +#define SYS_PTP_NXT_PTP_NXT BIT(0) + +#define SYS_PTP_CFG_PTP_STAMP_WID(x) (((x) << 2) & GENMASK(7, 2)) +#define SYS_PTP_CFG_PTP_STAMP_WID_M GENMASK(7, 2) +#define SYS_PTP_CFG_PTP_STAMP_WID_X(x) (((x) & GENMASK(7, 2)) >> 2) +#define SYS_PTP_CFG_PTP_CF_ROLL_MODE(x) ((x) & GENMASK(1, 0)) +#define SYS_PTP_CFG_PTP_CF_ROLL_MODE_M GENMASK(1, 0) + +#define SYS_RAM_INIT_RAM_INIT BIT(1) +#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0) + +#endif diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 6223930a8155..c60da9e8bf14 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -693,7 +693,7 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id) VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) return VXGE_HW_OK; else - return VXGE_HW_ERR_PRIVILAGED_OPEARATION; + return VXGE_HW_ERR_PRIVILEGED_OPERATION; } /* @@ -1920,7 +1920,7 @@ enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev, } if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; goto exit; } @@ -3153,7 +3153,7 @@ vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_mrpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { @@ -3165,7 +3165,7 @@ vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_srpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { @@ -3279,7 +3279,7 @@ vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_mrpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) { @@ -3291,7 +3291,7 @@ vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev, case vxge_hw_mgmt_reg_type_srpcim: if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) { - status = VXGE_HW_ERR_PRIVILAGED_OPEARATION; + status = VXGE_HW_ERR_PRIVILEGED_OPERATION; break; } if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) { diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h index cfa970417f81..d743a37a3cee 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h @@ -127,7 +127,7 @@ enum vxge_hw_status { VXGE_HW_ERR_INVALID_TCODE = VXGE_HW_BASE_ERR + 14, VXGE_HW_ERR_INVALID_BLOCK_SIZE = VXGE_HW_BASE_ERR + 15, VXGE_HW_ERR_INVALID_STATE = VXGE_HW_BASE_ERR + 16, - VXGE_HW_ERR_PRIVILAGED_OPEARATION = VXGE_HW_BASE_ERR + 17, + VXGE_HW_ERR_PRIVILEGED_OPERATION = VXGE_HW_BASE_ERR + 17, VXGE_HW_ERR_INVALID_PORT = VXGE_HW_BASE_ERR + 18, VXGE_HW_ERR_FIFO = VXGE_HW_BASE_ERR + 19, VXGE_HW_ERR_VPATH = VXGE_HW_BASE_ERR + 20, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c index 0452848d1316..03c3d1230c17 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c @@ -276,7 +276,7 @@ static void vxge_get_ethtool_stats(struct net_device *dev, *ptr++ = 0; status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats); if (status != VXGE_HW_OK) { - if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) { + if (status != VXGE_HW_ERR_PRIVILEGED_OPERATION) { vxge_debug_init(VXGE_ERR, "%s : %d Failure in getting xmac stats", __func__, __LINE__); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index b2299f2b2155..a8918bb7c802 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3484,11 +3484,11 @@ static int vxge_device_register(struct __vxge_hw_device *hldev, 0, &stat); - if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION) + if (status == VXGE_HW_ERR_PRIVILEGED_OPERATION) vxge_debug_init( vxge_hw_device_trace_level_get(hldev), "%s: device stats clear returns" - "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name); + "VXGE_HW_ERR_PRIVILEGED_OPERATION", ndev->name); vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev), "%s: %s:%d Exiting...", diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index ae0c46ba7546..66f15b05b65e 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -36,6 +36,19 @@ config NFP_APP_FLOWER either directly, with Open vSwitch, or any other way. Note that TC Flower offload requires specific FW to work. +config NFP_APP_ABM_NIC + bool "NFP4000/NFP6000 Advanced buffer management NIC support" + depends on NFP + depends on NET_SWITCHDEV + default y + help + Enable driver support for Advanced buffer management NIC on NFP. + ABM NIC allows advanced configuration of queuing and scheduling + of packets, including ECN marking. Say Y, if you are planning to + use one of the NFP4000 and NFP6000 platforms which support this + functionality. + Code will be built into the nfp.ko driver. + config NFP_DEBUG bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers" depends on NFP diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index d5866d708dfa..4afb10375397 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -30,12 +30,14 @@ nfp-objs := \ nfp_net_sriov.o \ nfp_netvf_main.o \ nfp_port.o \ + nfp_shared_buf.o \ nic/main.o ifeq ($(CONFIG_NFP_APP_FLOWER),y) nfp-objs += \ flower/action.o \ flower/cmsg.o \ + flower/lag_conf.o \ flower/main.o \ flower/match.o \ flower/metadata.o \ @@ -52,4 +54,10 @@ nfp-objs += \ bpf/jit.o endif +ifeq ($(CONFIG_NFP_APP_ABM_NIC),y) +nfp-objs += \ + abm/ctrl.o \ + abm/main.o +endif + nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c new file mode 100644 index 000000000000..b157ccd8c80f --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> + +#include "../nfpcore/nfp_cpp.h" +#include "../nfpcore/nfp_nffw.h" +#include "../nfp_app.h" +#include "../nfp_abi.h" +#include "../nfp_main.h" +#include "../nfp_net.h" +#include "main.h" + +#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u" +#define NFP_QLVL_STRIDE 16 +#define NFP_QLVL_BLOG_BYTES 0 +#define NFP_QLVL_BLOG_PKTS 4 +#define NFP_QLVL_THRS 8 + +#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats" +#define NFP_QMSTAT_STRIDE 32 +#define NFP_QMSTAT_NON_STO 0 +#define NFP_QMSTAT_STO 8 +#define NFP_QMSTAT_DROP 16 +#define NFP_QMSTAT_ECN 24 + +static unsigned long long +nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) +{ + return alink->abm->q_lvls->addr + + (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; +} + +static int +nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, + unsigned int stride, unsigned int offset, unsigned int i, + bool is_u64, u64 *res) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + u32 val32, mur; + u64 val, addr; + int err; + + mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain); + + addr = sym->addr + (alink->queue_base + i) * stride + offset; + if (is_u64) + err = nfp_cpp_readq(cpp, mur, addr, &val); + else + err = nfp_cpp_readl(cpp, mur, addr, &val32); + if (err) { + nfp_err(cpp, + "RED offload reading stat failed on vNIC %d queue %d\n", + alink->id, i); + return err; + } + + *res = is_u64 ? val : val32; + return 0; +} + +static int +nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, + unsigned int stride, unsigned int offset, bool is_u64, + u64 *res) +{ + u64 val, sum = 0; + unsigned int i; + int err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i, + is_u64, &val); + if (err) + return err; + sum += val; + } + + *res = sum; + return 0; +} + +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + u32 muw; + int err; + + muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target, + alink->abm->q_lvls->domain); + + err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val); + if (err) { + nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n", + alink->id, i); + return err; + } + + return 0; +} + +int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val) +{ + int i, err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + err = nfp_abm_ctrl_set_q_lvl(alink, i, val); + if (err) + return err; + } + + return 0; +} + +u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i) +{ + u64 val; + + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, + NFP_QMSTAT_NON_STO, i, true, &val)) + return 0; + return val; +} + +u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i) +{ + u64 val; + + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, + NFP_QMSTAT_STO, i, true, &val)) + return 0; + return val; +} + +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_stats *stats) +{ + int err; + + stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); + stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); + + err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, + i, false, &stats->backlog_bytes); + if (err) + return err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, + i, false, &stats->backlog_pkts); + if (err) + return err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + i, true, &stats->drops); + if (err) + return err; + + return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + i, true, &stats->overlimits); +} + +int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, + struct nfp_alink_stats *stats) +{ + u64 pkts = 0, bytes = 0; + int i, err; + + for (i = 0; i < alink->vnic->max_rx_rings; i++) { + pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); + bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); + } + stats->tx_pkts = pkts; + stats->tx_bytes = bytes; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, + false, &stats->backlog_bytes); + if (err) + return err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, + NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, + false, &stats->backlog_pkts); + if (err) + return err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + true, &stats->drops); + if (err) + return err; + + return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + true, &stats->overlimits); +} + +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_xstats *xstats) +{ + int err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + i, true, &xstats->pdrop); + if (err) + return err; + + return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + i, true, &xstats->ecn_marked); +} + +int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, + struct nfp_alink_xstats *xstats) +{ + int err; + + err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, + true, &xstats->pdrop); + if (err) + return err; + + return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, + true, &xstats->ecn_marked); +} + +int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm) +{ + return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE, + NULL, 0, NULL, 0); +} + +int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm) +{ + return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_DISABLE, + NULL, 0, NULL, 0); +} + +void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink) +{ + alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ); + alink->queue_base /= alink->vnic->stride_rx; +} + +static const struct nfp_rtsym * +nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size) +{ + const struct nfp_rtsym *sym; + + sym = nfp_rtsym_lookup(pf->rtbl, name); + if (!sym) { + nfp_err(pf->cpp, "Symbol '%s' not found\n", name); + return ERR_PTR(-ENOENT); + } + if (sym->size != size) { + nfp_err(pf->cpp, + "Symbol '%s' wrong size: expected %u got %llu\n", + name, size, sym->size); + return ERR_PTR(-EINVAL); + } + + return sym; +} + +static const struct nfp_rtsym * +nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name, + unsigned int size) +{ + return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS); +} + +int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) +{ + struct nfp_pf *pf = abm->app->pf; + const struct nfp_rtsym *sym; + unsigned int pf_id; + char pf_symbol[64]; + + pf_id = nfp_cppcore_pcie_unit(pf->cpp); + abm->pf_id = pf_id; + + snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id); + sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE); + if (IS_ERR(sym)) + return PTR_ERR(sym); + abm->q_lvls = sym; + + snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id); + sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE); + if (IS_ERR(sym)) + return PTR_ERR(sym); + abm->qm_stats = sym; + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c new file mode 100644 index 000000000000..1561c2724c26 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/bitfield.h> +#include <linux/etherdevice.h> +#include <linux/lockdep.h> +#include <linux/netdevice.h> +#include <linux/rcupdate.h> +#include <linux/slab.h> +#include <net/pkt_cls.h> +#include <net/pkt_sched.h> +#include <net/red.h> + +#include "../nfpcore/nfp.h" +#include "../nfpcore/nfp_cpp.h" +#include "../nfpcore/nfp_nsp.h" +#include "../nfp_app.h" +#include "../nfp_main.h" +#include "../nfp_net.h" +#include "../nfp_net_repr.h" +#include "../nfp_port.h" +#include "main.h" + +static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) +{ + return FIELD_PREP(NFP_ABM_PORTID_TYPE, rtype) | + FIELD_PREP(NFP_ABM_PORTID_ID, id); +} + +static int +__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs, u32 init_val) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + int ret; + + ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); + memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); + + alink->parent = handle; + alink->num_qdiscs = qs; + port->tc_offload_cnt = qs; + + return ret; +} + +static void +nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle, unsigned int qs) +{ + __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); +} + +static int +nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + unsigned int i = TC_H_MIN(opt->parent) - 1; + + if (opt->parent == TC_H_ROOT) + i = 0; + else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) + i = TC_H_MIN(opt->parent) - 1; + else + return -EOPNOTSUPP; + + if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) + return -EOPNOTSUPP; + + return i; +} + +static void +nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle) +{ + unsigned int i; + + for (i = 0; i < alink->num_qdiscs; i++) + if (handle == alink->qdiscs[i].handle) + break; + if (i == alink->num_qdiscs) + return; + + if (alink->parent == TC_H_ROOT) { + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + } else { + nfp_abm_ctrl_set_q_lvl(alink, i, ~0); + memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); + } +} + +static int +nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + bool existing; + int i, err; + + i = nfp_abm_red_find(alink, opt); + existing = i >= 0; + + if (opt->set.min != opt->set.max || !opt->set.is_ecn) { + nfp_warn(alink->abm->app->cpp, + "RED offload failed - unsupported parameters\n"); + err = -EINVAL; + goto err_destroy; + } + + if (existing) { + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); + else + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); + if (err) + goto err_destroy; + return 0; + } + + if (opt->parent == TC_H_ROOT) { + i = 0; + err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, + opt->set.min); + } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { + i = TC_H_MIN(opt->parent) - 1; + err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); + } else { + return -EINVAL; + } + /* Set the handle to try full clean up, in case IO failed */ + alink->qdiscs[i].handle = opt->handle; + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, + &alink->qdiscs[i].stats); + if (err) + goto err_destroy; + + if (opt->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, + &alink->qdiscs[i].xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, + &alink->qdiscs[i].xstats); + if (err) + goto err_destroy; + + alink->qdiscs[i].stats.backlog_pkts = 0; + alink->qdiscs[i].stats.backlog_bytes = 0; + + return 0; +err_destroy: + /* If the qdisc keeps on living, but we can't offload undo changes */ + if (existing) { + opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; + opt->set.qstats->backlog -= + alink->qdiscs[i].stats.backlog_bytes; + } + nfp_abm_red_destroy(netdev, alink, opt->handle); + + return err; +} + +static void +nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, + struct tc_qopt_offload_stats *stats) +{ + _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, + new->tx_pkts - old->tx_pkts); + stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; + stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; + stats->qstats->overlimits += new->overlimits - old->overlimits; + stats->qstats->drops += new->drops - old->drops; +} + +static int +nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_stats *prev_stats; + struct nfp_alink_stats stats; + int i, err; + + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_stats = &alink->qdiscs[i].stats; + + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_stats(alink, &stats); + else + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, prev_stats, &opt->stats); + + *prev_stats = stats; + + return 0; +} + +static int +nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_alink_xstats *prev_xstats; + struct nfp_alink_xstats xstats; + int i, err; + + i = nfp_abm_red_find(alink, opt); + if (i < 0) + return i; + prev_xstats = &alink->qdiscs[i].xstats; + + if (alink->parent == TC_H_ROOT) + err = nfp_abm_ctrl_read_xstats(alink, &xstats); + else + err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); + if (err) + return err; + + opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; + opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; + + *prev_xstats = xstats; + + return 0; +} + +static int +nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + switch (opt->command) { + case TC_RED_REPLACE: + return nfp_abm_red_replace(netdev, alink, opt); + case TC_RED_DESTROY: + nfp_abm_red_destroy(netdev, alink, opt->handle); + return 0; + case TC_RED_STATS: + return nfp_abm_red_stats(alink, opt); + case TC_RED_XSTATS: + return nfp_abm_red_xstats(alink, opt); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) +{ + struct nfp_alink_stats stats; + unsigned int i; + int err; + + for (i = 0; i < alink->num_qdiscs; i++) { + if (alink->qdiscs[i].handle == TC_H_UNSPEC) + continue; + + err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); + if (err) + return err; + + nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, + &opt->stats); + } + + return 0; +} + +static int +nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt) +{ + switch (opt->command) { + case TC_MQ_CREATE: + nfp_abm_reset_root(netdev, alink, opt->handle, + alink->total_queues); + return 0; + case TC_MQ_DESTROY: + if (opt->handle == alink->parent) + nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); + return 0; + case TC_MQ_STATS: + return nfp_abm_mq_stats(alink, opt); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, + enum tc_setup_type type, void *type_data) +{ + struct nfp_repr *repr = netdev_priv(netdev); + struct nfp_port *port; + + port = nfp_port_from_netdev(netdev); + if (!port || port->type != NFP_PORT_PF_PORT) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_QDISC_MQ: + return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data); + case TC_SETUP_QDISC_RED: + return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) +{ + enum nfp_repr_type rtype; + struct nfp_reprs *reprs; + u8 port; + + rtype = FIELD_GET(NFP_ABM_PORTID_TYPE, port_id); + port = FIELD_GET(NFP_ABM_PORTID_ID, port_id); + + reprs = rcu_dereference(app->reprs[rtype]); + if (!reprs) + return NULL; + + if (port >= reprs->num_reprs) + return NULL; + + return rcu_dereference(reprs->reprs[port]); +} + +static int +nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, + enum nfp_port_type ptype) +{ + struct net_device *netdev; + enum nfp_repr_type rtype; + struct nfp_reprs *reprs; + struct nfp_repr *repr; + struct nfp_port *port; + unsigned int txqs; + int err; + + if (ptype == NFP_PORT_PHYS_PORT) { + rtype = NFP_REPR_TYPE_PHYS_PORT; + txqs = 1; + } else { + rtype = NFP_REPR_TYPE_PF; + txqs = alink->vnic->max_rx_rings; + } + + netdev = nfp_repr_alloc_mqs(app, txqs, 1); + if (!netdev) + return -ENOMEM; + repr = netdev_priv(netdev); + repr->app_priv = alink; + + port = nfp_port_alloc(app, ptype, netdev); + if (IS_ERR(port)) { + err = PTR_ERR(port); + goto err_free_repr; + } + + if (ptype == NFP_PORT_PHYS_PORT) { + port->eth_forced = true; + err = nfp_port_init_phy_port(app->pf, app, port, alink->id); + if (err) + goto err_free_port; + } else { + port->pf_id = alink->abm->pf_id; + port->pf_split = app->pf->max_data_vnics > 1; + port->pf_split_id = alink->id; + port->vnic = alink->vnic->dp.ctrl_bar; + } + + SET_NETDEV_DEV(netdev, &alink->vnic->pdev->dev); + eth_hw_addr_random(netdev); + + err = nfp_repr_init(app, netdev, nfp_abm_portid(rtype, alink->id), + port, alink->vnic->dp.netdev); + if (err) + goto err_free_port; + + reprs = nfp_reprs_get_locked(app, rtype); + WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr"); + rcu_assign_pointer(reprs->reprs[alink->id], netdev); + + nfp_info(app->cpp, "%s Port %d Representor(%s) created\n", + ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys", + alink->id, netdev->name); + + return 0; + +err_free_port: + nfp_port_free(port); +err_free_repr: + nfp_repr_free(netdev); + return err; +} + +static void +nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink, + enum nfp_repr_type rtype) +{ + struct net_device *netdev; + struct nfp_reprs *reprs; + + reprs = nfp_reprs_get_locked(app, rtype); + netdev = nfp_repr_get_locked(app, reprs, alink->id); + if (!netdev) + return; + rcu_assign_pointer(reprs->reprs[alink->id], NULL); + synchronize_rcu(); + /* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */ + nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev)); +} + +static void +nfp_abm_kill_reprs(struct nfp_abm *abm, struct nfp_abm_link *alink) +{ + nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PF); + nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PHYS_PORT); +} + +static void nfp_abm_kill_reprs_all(struct nfp_abm *abm) +{ + struct nfp_pf *pf = abm->app->pf; + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) + nfp_abm_kill_reprs(abm, (struct nfp_abm_link *)nn->app_priv); +} + +static enum devlink_eswitch_mode nfp_abm_eswitch_mode_get(struct nfp_app *app) +{ + struct nfp_abm *abm = app->priv; + + return abm->eswitch_mode; +} + +static int nfp_abm_eswitch_set_legacy(struct nfp_abm *abm) +{ + nfp_abm_kill_reprs_all(abm); + nfp_abm_ctrl_qm_disable(abm); + + abm->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + return 0; +} + +static void nfp_abm_eswitch_clean_up(struct nfp_abm *abm) +{ + if (abm->eswitch_mode != DEVLINK_ESWITCH_MODE_LEGACY) + WARN_ON(nfp_abm_eswitch_set_legacy(abm)); +} + +static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) +{ + struct nfp_app *app = abm->app; + struct nfp_pf *pf = app->pf; + struct nfp_net *nn; + int err; + + err = nfp_abm_ctrl_qm_enable(abm); + if (err) + return err; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + struct nfp_abm_link *alink = nn->app_priv; + + err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PHYS_PORT); + if (err) + goto err_kill_all_reprs; + + err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PF_PORT); + if (err) + goto err_kill_all_reprs; + } + + abm->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + return 0; + +err_kill_all_reprs: + nfp_abm_kill_reprs_all(abm); + nfp_abm_ctrl_qm_disable(abm); + return err; +} + +static int nfp_abm_eswitch_mode_set(struct nfp_app *app, u16 mode) +{ + struct nfp_abm *abm = app->priv; + + if (abm->eswitch_mode == mode) + return 0; + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + return nfp_abm_eswitch_set_legacy(abm); + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + return nfp_abm_eswitch_set_switchdev(abm); + default: + return -EINVAL; + } +} + +static void +nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, + unsigned int id) +{ + struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id]; + u8 mac_addr[ETH_ALEN]; + const char *mac_str; + char name[32]; + + if (id > pf->eth_tbl->count) { + nfp_warn(pf->cpp, "No entry for persistent MAC address\n"); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + snprintf(name, sizeof(name), "eth%u.mac.pf%u", + eth_port->eth_index, abm->pf_id); + + mac_str = nfp_hwinfo_lookup(pf->hwinfo, name); + if (!mac_str) { + nfp_warn(pf->cpp, "Can't lookup persistent MAC address (%s)\n", + name); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + &mac_addr[0], &mac_addr[1], &mac_addr[2], + &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { + nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n", + mac_str); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); + ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); +} + +static int +nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) +{ + struct nfp_eth_table_port *eth_port = &app->pf->eth_tbl->ports[id]; + struct nfp_abm *abm = app->priv; + struct nfp_abm_link *alink; + int err; + + alink = kzalloc(sizeof(*alink), GFP_KERNEL); + if (!alink) + return -ENOMEM; + nn->app_priv = alink; + alink->abm = abm; + alink->vnic = nn; + alink->id = id; + alink->parent = TC_H_ROOT; + alink->total_queues = alink->vnic->max_rx_rings; + alink->qdiscs = kvzalloc(sizeof(*alink->qdiscs) * alink->total_queues, + GFP_KERNEL); + if (!alink->qdiscs) { + err = -ENOMEM; + goto err_free_alink; + } + + /* This is a multi-host app, make sure MAC/PHY is up, but don't + * make the MAC/PHY state follow the state of any of the ports. + */ + err = nfp_eth_set_configured(app->cpp, eth_port->index, true); + if (err < 0) + goto err_free_qdiscs; + + netif_keep_dst(nn->dp.netdev); + + nfp_abm_vnic_set_mac(app->pf, abm, nn, id); + nfp_abm_ctrl_read_params(alink); + + return 0; + +err_free_qdiscs: + kvfree(alink->qdiscs); +err_free_alink: + kfree(alink); + return err; +} + +static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) +{ + struct nfp_abm_link *alink = nn->app_priv; + + nfp_abm_kill_reprs(alink->abm, alink); + kvfree(alink->qdiscs); + kfree(alink); +} + +static u64 * +nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + unsigned int i; + + if (port->type != NFP_PORT_PF_PORT) + return data; + alink = repr->app_priv; + for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { + *data++ = nfp_abm_ctrl_stat_non_sto(alink, i); + *data++ = nfp_abm_ctrl_stat_sto(alink, i); + } + return data; +} + +static int +nfp_abm_port_get_stats_count(struct nfp_app *app, struct nfp_port *port) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + + if (port->type != NFP_PORT_PF_PORT) + return 0; + alink = repr->app_priv; + return alink->vnic->dp.num_r_vecs * 2; +} + +static u8 * +nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, + u8 *data) +{ + struct nfp_repr *repr = netdev_priv(port->netdev); + struct nfp_abm_link *alink; + unsigned int i; + + if (port->type != NFP_PORT_PF_PORT) + return data; + alink = repr->app_priv; + for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { + data = nfp_pr_et(data, "q%u_no_wait", i); + data = nfp_pr_et(data, "q%u_delayed", i); + } + return data; +} + +static int nfp_abm_init(struct nfp_app *app) +{ + struct nfp_pf *pf = app->pf; + struct nfp_reprs *reprs; + struct nfp_abm *abm; + int err; + + if (!pf->eth_tbl) { + nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); + return -EINVAL; + } + if (pf->max_data_vnics != pf->eth_tbl->count) { + nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", + pf->max_data_vnics, pf->eth_tbl->count); + return -EINVAL; + } + if (!pf->mac_stats_bar) { + nfp_warn(app->cpp, "ABM NIC requires mac_stats symbol\n"); + return -EINVAL; + } + + abm = kzalloc(sizeof(*abm), GFP_KERNEL); + if (!abm) + return -ENOMEM; + app->priv = abm; + abm->app = app; + + err = nfp_abm_ctrl_find_addrs(abm); + if (err) + goto err_free_abm; + + /* We start in legacy mode, make sure advanced queuing is disabled */ + err = nfp_abm_ctrl_qm_disable(abm); + if (err) + goto err_free_abm; + + err = -ENOMEM; + reprs = nfp_reprs_alloc(pf->max_data_vnics); + if (!reprs) + goto err_free_abm; + RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs); + + reprs = nfp_reprs_alloc(pf->max_data_vnics); + if (!reprs) + goto err_free_phys; + RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PF], reprs); + + return 0; + +err_free_phys: + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); +err_free_abm: + kfree(abm); + app->priv = NULL; + return err; +} + +static void nfp_abm_clean(struct nfp_app *app) +{ + struct nfp_abm *abm = app->priv; + + nfp_abm_eswitch_clean_up(abm); + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); + nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); + kfree(abm); + app->priv = NULL; +} + +const struct nfp_app_type app_abm = { + .id = NFP_APP_ACTIVE_BUFFER_MGMT_NIC, + .name = "abm", + + .init = nfp_abm_init, + .clean = nfp_abm_clean, + + .vnic_alloc = nfp_abm_vnic_alloc, + .vnic_free = nfp_abm_vnic_free, + + .port_get_stats = nfp_abm_port_get_stats, + .port_get_stats_count = nfp_abm_port_get_stats_count, + .port_get_stats_strings = nfp_abm_port_get_stats_strings, + + .setup_tc = nfp_abm_setup_tc, + + .eswitch_mode_get = nfp_abm_eswitch_mode_get, + .eswitch_mode_set = nfp_abm_eswitch_mode_set, + + .repr_get = nfp_abm_repr_get, +}; diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h new file mode 100644 index 000000000000..934a70835473 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __NFP_ABM_H__ +#define __NFP_ABM_H__ 1 + +#include <net/devlink.h> + +struct nfp_app; +struct nfp_net; + +#define NFP_ABM_PORTID_TYPE GENMASK(23, 16) +#define NFP_ABM_PORTID_ID GENMASK(7, 0) + +/** + * struct nfp_abm - ABM NIC app structure + * @app: back pointer to nfp_app + * @pf_id: ID of our PF link + * @eswitch_mode: devlink eswitch mode, advanced functions only visible + * in switchdev mode + * @q_lvls: queue level control area + * @qm_stats: queue statistics symbol + */ +struct nfp_abm { + struct nfp_app *app; + unsigned int pf_id; + enum devlink_eswitch_mode eswitch_mode; + const struct nfp_rtsym *q_lvls; + const struct nfp_rtsym *qm_stats; +}; + +/** + * struct nfp_alink_stats - ABM NIC statistics + * @tx_pkts: number of TXed packets + * @tx_bytes: number of TXed bytes + * @backlog_pkts: momentary backlog length (packets) + * @backlog_bytes: momentary backlog length (bytes) + * @overlimits: number of ECN marked TXed packets (accumulative) + * @drops: number of tail-dropped packets (accumulative) + */ +struct nfp_alink_stats { + u64 tx_pkts; + u64 tx_bytes; + u64 backlog_pkts; + u64 backlog_bytes; + u64 overlimits; + u64 drops; +}; + +/** + * struct nfp_alink_xstats - extended ABM NIC statistics + * @ecn_marked: number of ECN marked TXed packets + * @pdrop: number of hard drops due to queue limit + */ +struct nfp_alink_xstats { + u64 ecn_marked; + u64 pdrop; +}; + +/** + * struct nfp_red_qdisc - representation of single RED Qdisc + * @handle: handle of currently offloaded RED Qdisc + * @stats: statistics from last refresh + * @xstats: base of extended statistics + */ +struct nfp_red_qdisc { + u32 handle; + struct nfp_alink_stats stats; + struct nfp_alink_xstats xstats; +}; + +/** + * struct nfp_abm_link - port tuple of a ABM NIC + * @abm: back pointer to nfp_abm + * @vnic: data vNIC + * @id: id of the data vNIC + * @queue_base: id of base to host queue within PCIe (not QC idx) + * @total_queues: number of PF queues + * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT + * @num_qdiscs: number of currently used qdiscs + * @qdiscs: array of qdiscs + */ +struct nfp_abm_link { + struct nfp_abm *abm; + struct nfp_net *vnic; + unsigned int id; + unsigned int queue_base; + unsigned int total_queues; + u32 parent; + unsigned int num_qdiscs; + struct nfp_red_qdisc *qdiscs; +}; + +void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); +int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); +int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val); +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, + u32 val); +int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, + struct nfp_alink_stats *stats); +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_stats *stats); +int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, + struct nfp_alink_xstats *xstats); +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, + struct nfp_alink_xstats *xstats); +u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i); +u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i); +int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); +int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm); +#endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 7e298148ca26..cb87fccb9f6a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -102,6 +102,15 @@ nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) return nfp_bpf_cmsg_alloc(bpf, size); } +static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb) +{ + struct cmsg_hdr *hdr; + + hdr = (struct cmsg_hdr *)skb->data; + + return hdr->type; +} + static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb) { struct cmsg_hdr *hdr; @@ -431,6 +440,11 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) goto err_free; } + if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) { + nfp_bpf_event_output(bpf, skb); + return; + } + nfp_ctrl_lock(bpf->app->ctrl); tag = nfp_bpf_cmsg_get_tag(skb); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h index 39639ac28b01..4c7972e3db63 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -37,11 +37,20 @@ #include <linux/bitops.h> #include <linux/types.h> +/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking + * our FW ABI. In that case we will do translation in the driver. + */ +#define NFP_BPF_SCALAR_VALUE 1 +#define NFP_BPF_MAP_VALUE 4 +#define NFP_BPF_STACK 6 +#define NFP_BPF_PACKET_DATA 8 + enum bpf_cap_tlv_type { NFP_BPF_CAP_TYPE_FUNC = 1, NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2, NFP_BPF_CAP_TYPE_MAPS = 3, NFP_BPF_CAP_TYPE_RANDOM = 4, + NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, }; struct nfp_bpf_cap_tlv_func { @@ -81,6 +90,7 @@ enum nfp_bpf_cmsg_type { CMSG_TYPE_MAP_DELETE = 5, CMSG_TYPE_MAP_GETNEXT = 6, CMSG_TYPE_MAP_GETFIRST = 7, + CMSG_TYPE_BPF_EVENT = 8, __CMSG_TYPE_MAP_MAX, }; @@ -155,4 +165,13 @@ struct cmsg_reply_map_op { __be32 resv; struct cmsg_key_value_pair elem[0]; }; + +struct cmsg_bpf_event { + struct cmsg_hdr hdr; + __be32 cpu_id; + __be64 map_ptr; + __be32 data_size; + __be32 pkt_size; + u8 data[0]; +}; #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 29b4e5f8c102..8a92088df0d7 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -42,6 +42,7 @@ #include "main.h" #include "../nfp_asm.h" +#include "../nfp_net_ctrl.h" /* --- NFP prog --- */ /* Foreach "multiple" entries macros provide pos and next<n> pointers. @@ -211,6 +212,60 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) } static void +__emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer, + bool set, bool src_lmextn) +{ + u16 addr_lo, addr_hi; + u64 insn; + + addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO)); + addr_hi = addr != addr_lo; + + insn = OP_BR_BIT_BASE | + FIELD_PREP(OP_BR_BIT_A_SRC, areg) | + FIELD_PREP(OP_BR_BIT_B_SRC, breg) | + FIELD_PREP(OP_BR_BIT_BV, set) | + FIELD_PREP(OP_BR_BIT_DEFBR, defer) | + FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) | + FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) | + FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void +emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, + u8 defer, bool set, enum nfp_relo_type relo) +{ + struct nfp_insn_re_regs reg; + int err; + + /* NOTE: The bit to test is specified as an rotation amount, such that + * the bit to test will be placed on the MSB of the result when + * doing a rotate right. For bit X, we need right rotate X + 1. + */ + bit += 1; + + err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false); + if (err) { + nfp_prog->error = err; + return; + } + + __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set, + reg.src_lmextn); + + nfp_prog->prog[nfp_prog->prog_len - 1] |= + FIELD_PREP(OP_RELO_TYPE, relo); +} + +static void +emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer) +{ + emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL); +} + +static void __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, enum immed_width width, bool invert, enum immed_shift shift, bool wr_both, @@ -309,6 +364,19 @@ emit_shf(struct nfp_prog *nfp_prog, swreg dst, } static void +emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst, + swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc) +{ + if (sc == SHF_SC_R_ROT) { + pr_err("indirect shift is not allowed on rotation\n"); + nfp_prog->error = -EFAULT; + return; + } + + emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0); +} + +static void __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, bool dst_lmextn, bool src_lmextn) @@ -1214,45 +1282,83 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return 0; } -static int -wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum br_mask br_mask, bool swap) +static const struct jmp_code_map { + enum br_mask br_mask; + bool swap; +} jmp_code_map[] = { + [BPF_JGT >> 4] = { BR_BLO, true }, + [BPF_JGE >> 4] = { BR_BHS, false }, + [BPF_JLT >> 4] = { BR_BLO, false }, + [BPF_JLE >> 4] = { BR_BHS, true }, + [BPF_JSGT >> 4] = { BR_BLT, true }, + [BPF_JSGE >> 4] = { BR_BGE, false }, + [BPF_JSLT >> 4] = { BR_BLT, false }, + [BPF_JSLE >> 4] = { BR_BGE, true }, +}; + +static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta) +{ + unsigned int op; + + op = BPF_OP(meta->insn.code) >> 4; + /* br_mask of 0 is BR_BEQ which we don't use in jump code table */ + if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) || + !jmp_code_map[op].br_mask, + "no code found for jump instruction")) + return NULL; + + return &jmp_code_map[op]; +} + +static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ + const struct jmp_code_map *code; + enum alu_op alu_op, carry_op; u8 reg = insn->dst_reg * 2; swreg tmp_reg; + code = nfp_jmp_code_get(meta); + if (!code) + return -EINVAL; + + alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB; + carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C; + tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); - if (!swap) - emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); + if (!code->swap) + emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg); else - emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); + emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); - if (!swap) + if (!code->swap) emit_alu(nfp_prog, reg_none(), - reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); + reg_a(reg + 1), carry_op, tmp_reg); else emit_alu(nfp_prog, reg_none(), - tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); + tmp_reg, carry_op, reg_a(reg + 1)); - emit_br(nfp_prog, br_mask, insn->off, 0); + emit_br(nfp_prog, code->br_mask, insn->off, 0); return 0; } -static int -wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum br_mask br_mask, bool swap) +static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; + const struct jmp_code_map *code; u8 areg, breg; + code = nfp_jmp_code_get(meta); + if (!code) + return -EINVAL; + areg = insn->dst_reg * 2; breg = insn->src_reg * 2; - if (swap) { + if (code->swap) { areg ^= breg; breg ^= areg; areg ^= breg; @@ -1261,7 +1367,7 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); emit_alu(nfp_prog, reg_none(), reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); - emit_br(nfp_prog, br_mask, insn->off, 0); + emit_br(nfp_prog, code->br_mask, insn->off, 0); return 0; } @@ -1357,15 +1463,9 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - struct bpf_offloaded_map *offmap; - struct nfp_bpf_map *nfp_map; bool load_lm_ptr; u32 ret_tgt; s64 lm_off; - swreg tid; - - offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr; - nfp_map = offmap->dev_priv; /* We only have to reload LM0 if the key is not at start of stack */ lm_off = nfp_prog->stack_depth; @@ -1378,17 +1478,12 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (meta->func_id == BPF_FUNC_map_update_elem) emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2); - /* Load map ID into a register, it should actually fit as an immediate - * but in case it doesn't deal with it here, not in the delay slots. - */ - tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog)); - emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, 2, RELO_BR_HELPER); ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; /* Load map ID into A0 */ - wrp_mov(nfp_prog, reg_a(0), tid); + wrp_mov(nfp_prog, reg_a(0), reg_a(2)); /* Load the return address into B0 */ wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); @@ -1400,7 +1495,7 @@ map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) if (!load_lm_ptr) return 0; - emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); + emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0); wrp_nops(nfp_prog, 3); return 0; @@ -1418,6 +1513,63 @@ nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int +nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + swreg ptr_type; + u32 ret_tgt; + + ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog)); + + ret_tgt = nfp_prog_current_offset(nfp_prog) + 3; + + emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id, + 2, RELO_BR_HELPER); + + /* Load ptr type into A1 */ + wrp_mov(nfp_prog, reg_a(1), ptr_type); + + /* Load the return address into B0 */ + wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL); + + if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt)) + return -EINVAL; + + return 0; +} + +static int +nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + u32 jmp_tgt; + + jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5; + + /* Make sure the queue id fits into FW field */ + emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2), + ALU_OP_AND_NOT_B, reg_imm(0xff)); + emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2); + + /* Set the 'queue selected' bit and the queue value */ + emit_shf(nfp_prog, pv_qsel_set(nfp_prog), + pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1), + SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT); + emit_ld_field(nfp_prog, + pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2), + SHF_SC_NONE, 0); + /* Delay slots end here, we will jump over next instruction if queue + * value fits into the field. + */ + emit_ld_field(nfp_prog, + pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX), + SHF_SC_NONE, 0); + + if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt)) + return -EINVAL; + + return 0; +} + /* --- Callbacks --- */ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@ -1544,26 +1696,142 @@ static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +/* Pseudo code: + * if shift_amt >= 32 + * dst_high = dst_low << shift_amt[4:0] + * dst_low = 0; + * else + * dst_high = (dst_high, dst_low) >> (32 - shift_amt) + * dst_low = dst_low << shift_amt + * + * The indirect shift will use the same logic at runtime. + */ +static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + if (shift_amt < 32) { + emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), + SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, + 32 - shift_amt); + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF, shift_amt); + } else if (shift_amt == 32) { + wrp_reg_mov(nfp_prog, dst + 1, dst); + wrp_immed(nfp_prog, reg_both(dst), 0); + } else if (shift_amt > 32) { + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF, shift_amt - 32); + wrp_immed(nfp_prog, reg_both(dst), 0); + } + + return 0; +} + static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u8 dst = insn->dst_reg * 2; - if (insn->imm < 32) { - emit_shf(nfp_prog, reg_both(dst + 1), - reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), - SHF_SC_R_DSHF, 32 - insn->imm); - emit_shf(nfp_prog, reg_both(dst), - reg_none(), SHF_OP_NONE, reg_b(dst), - SHF_SC_L_SHF, insn->imm); - } else if (insn->imm == 32) { - wrp_reg_mov(nfp_prog, dst + 1, dst); - wrp_immed(nfp_prog, reg_both(dst), 0); - } else if (insn->imm > 32) { - emit_shf(nfp_prog, reg_both(dst + 1), - reg_none(), SHF_OP_NONE, reg_b(dst), - SHF_SC_L_SHF, insn->imm - 32); - wrp_immed(nfp_prog, reg_both(dst), 0); + return __shl_imm64(nfp_prog, dst, insn->imm); +} + +static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB, + reg_b(src)); + emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF); +} + +/* NOTE: for indirect left shift, HIGH part should be calculated first. */ +static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF); +} + +static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + shl_reg64_lt32_high(nfp_prog, dst, src); + shl_reg64_lt32_low(nfp_prog, dst, src); +} + +static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF); + wrp_immed(nfp_prog, reg_both(dst), 0); +} + +static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin; + umax = meta->umax; + if (umin == umax) + return __shl_imm64(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + if (umax < 32) { + shl_reg64_lt32(nfp_prog, dst, src); + } else if (umin >= 32) { + shl_reg64_ge32(nfp_prog, dst, src); + } else { + /* Generate different instruction sequences depending on runtime + * value of shift amount. + */ + u16 label_ge32, label_end; + + label_ge32 = nfp_prog_current_offset(nfp_prog) + 7; + emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); + + shl_reg64_lt32_high(nfp_prog, dst, src); + label_end = nfp_prog_current_offset(nfp_prog) + 6; + emit_br(nfp_prog, BR_UNC, label_end, 2); + /* shl_reg64_lt32_low packed in delay slot. */ + shl_reg64_lt32_low(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) + return -EINVAL; + shl_reg64_ge32(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) + return -EINVAL; + } + + return 0; +} + +/* Pseudo code: + * if shift_amt >= 32 + * dst_high = 0; + * dst_low = dst_high >> shift_amt[4:0] + * else + * dst_high = dst_high >> shift_amt + * dst_low = (dst_high, dst_low) >> shift_amt + * + * The indirect shift will use the same logic at runtime. + */ +static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + if (shift_amt < 32) { + emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF, shift_amt); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); + } else if (shift_amt == 32) { + wrp_reg_mov(nfp_prog, dst, dst + 1); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + } else if (shift_amt > 32) { + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); } return 0; @@ -1574,21 +1842,186 @@ static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) const struct bpf_insn *insn = &meta->insn; u8 dst = insn->dst_reg * 2; - if (insn->imm < 32) { - emit_shf(nfp_prog, reg_both(dst), - reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), - SHF_SC_R_DSHF, insn->imm); - emit_shf(nfp_prog, reg_both(dst + 1), - reg_none(), SHF_OP_NONE, reg_b(dst + 1), - SHF_SC_R_SHF, insn->imm); - } else if (insn->imm == 32) { + return __shr_imm64(nfp_prog, dst, insn->imm); +} + +/* NOTE: for indirect right shift, LOW part should be calculated first. */ +static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF); +} + +static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF); +} + +static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + shr_reg64_lt32_low(nfp_prog, dst, src); + shr_reg64_lt32_high(nfp_prog, dst, src); +} + +static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst + 1), SHF_SC_R_SHF); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); +} + +static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin; + umax = meta->umax; + if (umin == umax) + return __shr_imm64(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + if (umax < 32) { + shr_reg64_lt32(nfp_prog, dst, src); + } else if (umin >= 32) { + shr_reg64_ge32(nfp_prog, dst, src); + } else { + /* Generate different instruction sequences depending on runtime + * value of shift amount. + */ + u16 label_ge32, label_end; + + label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; + emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); + shr_reg64_lt32_low(nfp_prog, dst, src); + label_end = nfp_prog_current_offset(nfp_prog) + 6; + emit_br(nfp_prog, BR_UNC, label_end, 2); + /* shr_reg64_lt32_high packed in delay slot. */ + shr_reg64_lt32_high(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) + return -EINVAL; + shr_reg64_ge32(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) + return -EINVAL; + } + + return 0; +} + +/* Code logic is the same as __shr_imm64 except ashr requires signedness bit + * told through PREV_ALU result. + */ +static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + if (shift_amt < 32) { + emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_DSHF, shift_amt); + /* Set signedness bit. */ + emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, + reg_imm(0)); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt); + } else if (shift_amt == 32) { + /* NOTE: this also helps setting signedness bit. */ wrp_reg_mov(nfp_prog, dst, dst + 1); - wrp_immed(nfp_prog, reg_both(dst + 1), 0); - } else if (insn->imm > 32) { - emit_shf(nfp_prog, reg_both(dst), - reg_none(), SHF_OP_NONE, reg_b(dst + 1), - SHF_SC_R_SHF, insn->imm - 32); - wrp_immed(nfp_prog, reg_both(dst + 1), 0); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, 31); + } else if (shift_amt > 32) { + emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR, + reg_imm(0)); + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, 31); + } + + return 0; +} + +static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; + + return __ashr_imm64(nfp_prog, dst, insn->imm); +} + +static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + /* NOTE: the first insn will set both indirect shift amount (source A) + * and signedness bit (MSB of result). + */ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); + emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF); +} + +static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + /* NOTE: it is the same as logic shift because we don't need to shift in + * signedness bit when the shift amount is less than 32. + */ + return shr_reg64_lt32_low(nfp_prog, dst, src); +} + +static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + ashr_reg64_lt32_low(nfp_prog, dst, src); + ashr_reg64_lt32_high(nfp_prog, dst, src); +} + +static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src) +{ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF); + emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR, + reg_b(dst + 1), SHF_SC_R_SHF, 31); +} + +/* Like ashr_imm64, but need to use indirect shift. */ +static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin; + umax = meta->umax; + if (umin == umax) + return __ashr_imm64(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + if (umax < 32) { + ashr_reg64_lt32(nfp_prog, dst, src); + } else if (umin >= 32) { + ashr_reg64_ge32(nfp_prog, dst, src); + } else { + u16 label_ge32, label_end; + + label_ge32 = nfp_prog_current_offset(nfp_prog) + 6; + emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0); + ashr_reg64_lt32_low(nfp_prog, dst, src); + label_end = nfp_prog_current_offset(nfp_prog) + 6; + emit_br(nfp_prog, BR_UNC, label_end, 2); + /* ashr_reg64_lt32_high packed in delay slot. */ + ashr_reg64_lt32_high(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32)) + return -EINVAL; + ashr_reg64_ge32(nfp_prog, dst, src); + + if (!nfp_prog_confirm_current_offset(nfp_prog, label_end)) + return -EINVAL; } return 0; @@ -2108,6 +2541,17 @@ mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, false, wrp_lmem_store); } +static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + switch (meta->insn.off) { + case offsetof(struct xdp_md, rx_queue_index): + return nfp_queue_select(nfp_prog, meta); + } + + WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */ + return -EOPNOTSUPP; +} + static int mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) @@ -2134,6 +2578,9 @@ static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { + if (meta->ptr.type == PTR_TO_CTX) + if (nfp_prog->type == BPF_PROG_TYPE_XDP) + return mem_stx_xdp(nfp_prog, meta); return mem_stx(nfp_prog, meta, 4); } @@ -2283,46 +2730,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } -static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); -} - -static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); -} - -static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); -} - -static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); -} - -static int jsgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLT, true); -} - -static int jsge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BGE, false); -} - -static int jslt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BLT, false); -} - -static int jsle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_imm(nfp_prog, meta, BR_BGE, true); -} - static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -2392,46 +2799,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } -static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); -} - -static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); -} - -static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); -} - -static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); -} - -static int jsgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLT, true); -} - -static int jsge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BGE, false); -} - -static int jslt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BLT, false); -} - -static int jsle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -{ - return wrp_cmp_reg(nfp_prog, meta, BR_BGE, true); -} - static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); @@ -2453,6 +2820,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return map_call_stack_common(nfp_prog, meta); case BPF_FUNC_get_prandom_u32: return nfp_get_prandom_u32(nfp_prog, meta); + case BPF_FUNC_perf_event_output: + return nfp_perf_event_output(nfp_prog, meta); default: WARN_ONCE(1, "verifier allowed unsupported function\n"); return -EOPNOTSUPP; @@ -2480,8 +2849,12 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, [BPF_ALU64 | BPF_NEG] = neg_reg64, + [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64, [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, + [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64, [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, + [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64, + [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64, [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, [BPF_ALU | BPF_MOV | BPF_K] = mov_imm, [BPF_ALU | BPF_XOR | BPF_X] = xor_reg, @@ -2520,25 +2893,25 @@ static const instr_cb_t instr_cb[256] = { [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, [BPF_JMP | BPF_JA | BPF_K] = jump, [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, - [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, - [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, - [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, - [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, - [BPF_JMP | BPF_JSGT | BPF_K] = jsgt_imm, - [BPF_JMP | BPF_JSGE | BPF_K] = jsge_imm, - [BPF_JMP | BPF_JSLT | BPF_K] = jslt_imm, - [BPF_JMP | BPF_JSLE | BPF_K] = jsle_imm, + [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm, + [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm, [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, - [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, - [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, - [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, - [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, - [BPF_JMP | BPF_JSGT | BPF_X] = jsgt_reg, - [BPF_JMP | BPF_JSGE | BPF_X] = jsge_reg, - [BPF_JMP | BPF_JSLT | BPF_X] = jslt_reg, - [BPF_JMP | BPF_JSLE | BPF_X] = jsle_reg, + [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg, + [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_CALL] = call, @@ -2777,6 +3150,54 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) } } +/* abs(insn.imm) will fit better into unrestricted reg immediate - + * convert add/sub of a negative number into a sub/add of a positive one. + */ +static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta; + + list_for_each_entry(meta, &nfp_prog->insns, l) { + struct bpf_insn insn = meta->insn; + + if (meta->skip) + continue; + + if (BPF_CLASS(insn.code) != BPF_ALU && + BPF_CLASS(insn.code) != BPF_ALU64 && + BPF_CLASS(insn.code) != BPF_JMP) + continue; + if (BPF_SRC(insn.code) != BPF_K) + continue; + if (insn.imm >= 0) + continue; + + if (BPF_CLASS(insn.code) == BPF_JMP) { + switch (BPF_OP(insn.code)) { + case BPF_JGE: + case BPF_JSGE: + case BPF_JLT: + case BPF_JSLT: + meta->jump_neg_op = true; + break; + default: + continue; + } + } else { + if (BPF_OP(insn.code) == BPF_ADD) + insn.code = BPF_CLASS(insn.code) | BPF_SUB; + else if (BPF_OP(insn.code) == BPF_SUB) + insn.code = BPF_CLASS(insn.code) | BPF_ADD; + else + continue; + + meta->insn.code = insn.code | BPF_K; + } + + meta->insn.imm = -insn.imm; + } +} + /* Remove masking after load since our load guarantees this is not needed */ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) { @@ -3212,6 +3633,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) { nfp_bpf_opt_reg_init(nfp_prog); + nfp_bpf_opt_neg_add_sub(nfp_prog); nfp_bpf_opt_ld_mask(nfp_prog); nfp_bpf_opt_ld_shift(nfp_prog); nfp_bpf_opt_ldst_gather(nfp_prog); @@ -3220,6 +3642,33 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) return 0; } +static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta1, *meta2; + struct nfp_bpf_map *nfp_map; + struct bpf_map *map; + + nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { + if (meta1->skip || meta2->skip) + continue; + + if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || + meta1->insn.src_reg != BPF_PSEUDO_MAP_FD) + continue; + + map = (void *)(unsigned long)((u32)meta1->insn.imm | + (u64)meta2->insn.imm << 32); + if (bpf_map_offload_neutral(map)) + continue; + nfp_map = map_to_offmap(map)->dev_priv; + + meta1->insn.imm = nfp_map->tid; + meta2->insn.imm = 0; + } + + return 0; +} + static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len) { __le64 *ustore = (__force __le64 *)prog; @@ -3256,6 +3705,10 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog) { int ret; + ret = nfp_bpf_replace_map_ptrs(nfp_prog); + if (ret) + return ret; + ret = nfp_bpf_optimize(nfp_prog); if (ret) return ret; @@ -3346,6 +3799,9 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) case BPF_FUNC_map_delete_elem: val = nfp_prog->bpf->helpers.map_delete; break; + case BPF_FUNC_perf_event_output: + val = nfp_prog->bpf->helpers.perf_event_output; + break; default: pr_err("relocation of unknown helper %d\n", val); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 35fb31f682af..fcdfb8e7fdea 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -43,6 +43,14 @@ #include "fw.h" #include "main.h" +const struct rhashtable_params nfp_bpf_maps_neutral_params = { + .nelem_hint = 4, + .key_len = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr), + .key_offset = offsetof(struct nfp_bpf_neutral_map, ptr), + .head_offset = offsetof(struct nfp_bpf_neutral_map, l), + .automatic_shrinking = true, +}; + static bool nfp_net_ebpf_capable(struct nfp_net *nn) { #ifdef __LITTLE_ENDIAN @@ -290,6 +298,9 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) case BPF_FUNC_map_delete_elem: bpf->helpers.map_delete = readl(&cap->func_addr); break; + case BPF_FUNC_perf_event_output: + bpf->helpers.perf_event_output = readl(&cap->func_addr); + break; } return 0; @@ -323,6 +334,13 @@ nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value, return 0; } +static int +nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) +{ + bpf->queue_select = true; + return 0; +} + static int nfp_bpf_parse_capabilities(struct nfp_app *app) { struct nfp_cpp *cpp = app->pf->cpp; @@ -365,6 +383,10 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) if (nfp_bpf_parse_cap_random(app->priv, value, length)) goto err_release_free; break; + case NFP_BPF_CAP_TYPE_QUEUE_SELECT: + if (nfp_bpf_parse_cap_qsel(app->priv, value, length)) + goto err_release_free; + break; default: nfp_dbg(cpp, "unknown BPF capability: %d\n", type); break; @@ -401,17 +423,28 @@ static int nfp_bpf_init(struct nfp_app *app) init_waitqueue_head(&bpf->cmsg_wq); INIT_LIST_HEAD(&bpf->map_list); - err = nfp_bpf_parse_capabilities(app); + err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params); if (err) goto err_free_bpf; + err = nfp_bpf_parse_capabilities(app); + if (err) + goto err_free_neutral_maps; + return 0; +err_free_neutral_maps: + rhashtable_destroy(&bpf->maps_neutral); err_free_bpf: kfree(bpf); return err; } +static void nfp_check_rhashtable_empty(void *ptr, void *arg) +{ + WARN_ON_ONCE(1); +} + static void nfp_bpf_clean(struct nfp_app *app) { struct nfp_app_bpf *bpf = app->priv; @@ -419,6 +452,8 @@ static void nfp_bpf_clean(struct nfp_app *app) WARN_ON(!skb_queue_empty(&bpf->cmsg_replies)); WARN_ON(!list_empty(&bpf->map_list)); WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use); + rhashtable_free_and_destroy(&bpf->maps_neutral, + nfp_check_rhashtable_empty, NULL); kfree(bpf); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 4981c8944ca3..654fe7823e5e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -39,6 +39,7 @@ #include <linux/bpf_verifier.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/rhashtable.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/wait.h> @@ -81,10 +82,16 @@ enum static_regs { enum pkt_vec { PKT_VEC_PKT_LEN = 0, PKT_VEC_PKT_PTR = 2, + PKT_VEC_QSEL_SET = 4, + PKT_VEC_QSEL_VAL = 6, }; +#define PKT_VEL_QSEL_SET_BIT 4 + #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) +#define pv_qsel_set(np) reg_lm(1, PKT_VEC_QSEL_SET) +#define pv_qsel_val(np) reg_lm(1, PKT_VEC_QSEL_VAL) #define stack_reg(np) reg_a(STATIC_REG_STACK) #define stack_imm(np) imm_b(np) @@ -114,6 +121,8 @@ enum pkt_vec { * @maps_in_use: number of currently offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps * + * @maps_neutral: hash table of offload-neutral maps (on pointer) + * * @adjust_head: adjust head capability * @adjust_head.flags: extra flags for adjust head * @adjust_head.off_min: minimal packet offset within buffer required @@ -133,8 +142,10 @@ enum pkt_vec { * @helpers.map_lookup: map lookup helper address * @helpers.map_update: map update helper address * @helpers.map_delete: map delete helper address + * @helpers.perf_event_output: output perf event to a ring buffer * * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) + * @queue_select: BPF can set the RX queue ID in packet vector */ struct nfp_app_bpf { struct nfp_app *app; @@ -150,6 +161,8 @@ struct nfp_app_bpf { unsigned int maps_in_use; unsigned int map_elems_in_use; + struct rhashtable maps_neutral; + struct nfp_bpf_cap_adjust_head { u32 flags; int off_min; @@ -171,9 +184,11 @@ struct nfp_app_bpf { u32 map_lookup; u32 map_update; u32 map_delete; + u32 perf_event_output; } helpers; bool pseudo_random; + bool queue_select; }; enum nfp_bpf_map_use { @@ -199,6 +214,14 @@ struct nfp_bpf_map { enum nfp_bpf_map_use use_map[]; }; +struct nfp_bpf_neutral_map { + struct rhash_head l; + struct bpf_map *ptr; + u32 count; +}; + +extern const struct rhashtable_params nfp_bpf_maps_neutral_params; + struct nfp_prog; struct nfp_insn_meta; typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); @@ -236,9 +259,12 @@ struct nfp_bpf_reg_state { * @xadd_over_16bit: 16bit immediate is not guaranteed * @xadd_maybe_16bit: 16bit immediate is possible * @jmp_dst: destination info for jump instructions + * @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB * @func_id: function id for call instructions * @arg1: arg1 for call instructions * @arg2: arg2 for call instructions + * @umin: copy of core verifier umin_value. + * @umax: copy of core verifier umax_value. * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags @@ -264,13 +290,23 @@ struct nfp_insn_meta { bool xadd_maybe_16bit; }; /* jump */ - struct nfp_insn_meta *jmp_dst; + struct { + struct nfp_insn_meta *jmp_dst; + bool jump_neg_op; + }; /* function calls */ struct { u32 func_id; struct bpf_reg_state arg1; struct nfp_bpf_reg_state arg2; }; + /* We are interested in range info for some operands, + * for example, the shift amount. + */ + struct { + u64 umin; + u64 umax; + }; }; unsigned int off; unsigned short n; @@ -348,6 +384,25 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD); } +static inline bool is_mbpf_indir_shift(const struct nfp_insn_meta *meta) +{ + u8 code = meta->insn.code; + bool is_alu, is_shift; + u8 opclass, opcode; + + opclass = BPF_CLASS(code); + is_alu = opclass == BPF_ALU64 || opclass == BPF_ALU; + if (!is_alu) + return false; + + opcode = BPF_OP(code); + is_shift = opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH; + if (!is_shift) + return false; + + return BPF_SRC(code) == BPF_X; +} + /** * struct nfp_prog - nfp BPF program * @bpf: backpointer to the bpf app priv structure @@ -363,6 +418,8 @@ static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta) * @error: error code if something went wrong * @stack_depth: max stack depth from the verifier * @adjust_head_location: if program has single adjust head call - the insn no. + * @map_records_cnt: the number of map pointers recorded for this prog + * @map_records: the map record pointers from bpf->maps_neutral * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -386,6 +443,9 @@ struct nfp_prog { unsigned int stack_depth; unsigned int adjust_head_location; + unsigned int map_records_cnt; + struct nfp_bpf_neutral_map **map_records; + struct list_head insns; }; @@ -436,5 +496,7 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, void *key, void *next_key); +int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb); + void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 42d98792bd25..7eae4c0266f8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -57,6 +57,126 @@ #include "../nfp_net.h" static int +nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, + struct bpf_map *map) +{ + struct nfp_bpf_neutral_map *record; + int err; + + /* Map record paths are entered via ndo, update side is protected. */ + ASSERT_RTNL(); + + /* Reuse path - other offloaded program is already tracking this map. */ + record = rhashtable_lookup_fast(&bpf->maps_neutral, &map, + nfp_bpf_maps_neutral_params); + if (record) { + nfp_prog->map_records[nfp_prog->map_records_cnt++] = record; + record->count++; + return 0; + } + + /* Grab a single ref to the map for our record. The prog destroy ndo + * happens after free_used_maps(). + */ + map = bpf_map_inc(map, false); + if (IS_ERR(map)) + return PTR_ERR(map); + + record = kmalloc(sizeof(*record), GFP_KERNEL); + if (!record) { + err = -ENOMEM; + goto err_map_put; + } + + record->ptr = map; + record->count = 1; + + err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l, + nfp_bpf_maps_neutral_params); + if (err) + goto err_free_rec; + + nfp_prog->map_records[nfp_prog->map_records_cnt++] = record; + + return 0; + +err_free_rec: + kfree(record); +err_map_put: + bpf_map_put(map); + return err; +} + +static void +nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog) +{ + bool freed = false; + int i; + + ASSERT_RTNL(); + + for (i = 0; i < nfp_prog->map_records_cnt; i++) { + if (--nfp_prog->map_records[i]->count) { + nfp_prog->map_records[i] = NULL; + continue; + } + + WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral, + &nfp_prog->map_records[i]->l, + nfp_bpf_maps_neutral_params)); + freed = true; + } + + if (freed) { + synchronize_rcu(); + + for (i = 0; i < nfp_prog->map_records_cnt; i++) + if (nfp_prog->map_records[i]) { + bpf_map_put(nfp_prog->map_records[i]->ptr); + kfree(nfp_prog->map_records[i]); + } + } + + kfree(nfp_prog->map_records); + nfp_prog->map_records = NULL; + nfp_prog->map_records_cnt = 0; +} + +static int +nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, + struct bpf_prog *prog) +{ + int i, cnt, err; + + /* Quickly count the maps we will have to remember */ + cnt = 0; + for (i = 0; i < prog->aux->used_map_cnt; i++) + if (bpf_map_offload_neutral(prog->aux->used_maps[i])) + cnt++; + if (!cnt) + return 0; + + nfp_prog->map_records = kmalloc_array(cnt, + sizeof(nfp_prog->map_records[0]), + GFP_KERNEL); + if (!nfp_prog->map_records) + return -ENOMEM; + + for (i = 0; i < prog->aux->used_map_cnt; i++) + if (bpf_map_offload_neutral(prog->aux->used_maps[i])) { + err = nfp_map_ptr_record(bpf, nfp_prog, + prog->aux->used_maps[i]); + if (err) { + nfp_map_ptrs_forget(bpf, nfp_prog); + return err; + } + } + WARN_ON(cnt != nfp_prog->map_records_cnt); + + return 0; +} + +static int nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, unsigned int cnt) { @@ -70,6 +190,8 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, meta->insn = prog[i]; meta->n = i; + if (is_mbpf_indir_shift(meta)) + meta->umin = U64_MAX; list_add_tail(&meta->l, &nfp_prog->insns); } @@ -151,7 +273,7 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64); prog->aux->offload->jited_image = nfp_prog->prog; - return 0; + return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog); } static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) @@ -159,6 +281,7 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; kvfree(nfp_prog->prog); + nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog); nfp_prog_free(nfp_prog); return 0; @@ -320,6 +443,53 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) } } +static unsigned long +nfp_bpf_perf_event_copy(void *dst, const void *src, + unsigned long off, unsigned long len) +{ + memcpy(dst, src + off, len); + return 0; +} + +int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb) +{ + struct cmsg_bpf_event *cbe = (void *)skb->data; + u32 pkt_size, data_size; + struct bpf_map *map; + + if (skb->len < sizeof(struct cmsg_bpf_event)) + goto err_drop; + + pkt_size = be32_to_cpu(cbe->pkt_size); + data_size = be32_to_cpu(cbe->data_size); + map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr); + + if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + goto err_drop; + if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION) + goto err_drop; + + rcu_read_lock(); + if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map, + nfp_bpf_maps_neutral_params)) { + rcu_read_unlock(); + pr_warn("perf event: dest map pointer %px not recognized, dropping event\n", + map); + goto err_drop; + } + + bpf_event_output(map, be32_to_cpu(cbe->cpu_id), + &cbe->data[round_up(pkt_size, 4)], data_size, + cbe->data, pkt_size, nfp_bpf_perf_event_copy); + rcu_read_unlock(); + + dev_consume_skb_any(skb); + return 0; +err_drop: + dev_kfree_skb_any(skb); + return -EINVAL; +} + static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 06ad53ce4ad9..4bfeba7b21b2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2017 Netronome Systems, Inc. + * Copyright (C) 2016-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -36,6 +36,8 @@ #include <linux/kernel.h> #include <linux/pkt_cls.h> +#include "../nfp_app.h" +#include "../nfp_main.h" #include "fw.h" #include "main.h" @@ -149,15 +151,6 @@ nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env, return false; } - /* Rest of the checks is only if we re-parse the same insn */ - if (!meta->func_id) - return true; - - if (meta->arg1.map_ptr != reg1->map_ptr) { - pr_vlog(env, "%s: called for different map\n", fname); - return false; - } - return true; } @@ -216,6 +209,71 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n"); return -EOPNOTSUPP; + case BPF_FUNC_perf_event_output: + BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE || + NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE || + NFP_BPF_STACK != PTR_TO_STACK || + NFP_BPF_PACKET_DATA != PTR_TO_PACKET); + + if (!bpf->helpers.perf_event_output) { + pr_vlog(env, "event_output: not supported by FW\n"); + return -EOPNOTSUPP; + } + + /* Force current CPU to make sure we can report the event + * wherever we get the control message from FW. + */ + if (reg3->var_off.mask & BPF_F_INDEX_MASK || + (reg3->var_off.value & BPF_F_INDEX_MASK) != + BPF_F_CURRENT_CPU) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off); + pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n", + tn_buf); + return -EOPNOTSUPP; + } + + /* Save space in meta, we don't care about arguments other + * than 4th meta, shove it into arg1. + */ + reg1 = cur_regs(env) + BPF_REG_4; + + if (reg1->type != SCALAR_VALUE /* NULL ptr */ && + reg1->type != PTR_TO_STACK && + reg1->type != PTR_TO_MAP_VALUE && + reg1->type != PTR_TO_PACKET) { + pr_vlog(env, "event_output: unsupported ptr type: %d\n", + reg1->type); + return -EOPNOTSUPP; + } + + if (reg1->type == PTR_TO_STACK && + !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL)) + return -EOPNOTSUPP; + + /* Warn user that on offload NFP may return success even if map + * is not going to accept the event, since the event output is + * fully async and device won't know the state of the map. + * There is also FW limitation on the event length. + * + * Lost events will not show up on the perf ring, driver + * won't see them at all. Events may also get reordered. + */ + dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev, + "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n"); + pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n"); + + if (!meta->func_id) + break; + + if (reg1->type != meta->arg1.type) { + pr_vlog(env, "event_output: ptr type changed: %d %d\n", + meta->arg1.type, reg1->type); + return -EINVAL; + } + break; + default: pr_vlog(env, "unsupported function id: %d\n", func_id); return -EOPNOTSUPP; @@ -410,6 +468,30 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } static int +nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + struct bpf_verifier_env *env) +{ + const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg; + + if (reg->type == PTR_TO_CTX) { + if (nfp_prog->type == BPF_PROG_TYPE_XDP) { + /* XDP ctx accesses must be 4B in size */ + switch (meta->insn.off) { + case offsetof(struct xdp_md, rx_queue_index): + if (nfp_prog->bpf->queue_select) + goto exit_check_ptr; + pr_vlog(env, "queue selection not supported by FW\n"); + return -EOPNOTSUPP; + } + } + pr_vlog(env, "unsupported store to context field\n"); + return -EOPNOTSUPP; + } +exit_check_ptr: + return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); +} + +static int nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, struct bpf_verifier_env *env) { @@ -464,11 +546,19 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.src_reg); if (is_mbpf_store(meta)) - return nfp_bpf_check_ptr(nfp_prog, meta, env, - meta->insn.dst_reg); + return nfp_bpf_check_store(nfp_prog, meta, env); + if (is_mbpf_xadd(meta)) return nfp_bpf_check_xadd(nfp_prog, meta, env); + if (is_mbpf_indir_shift(meta)) { + const struct bpf_reg_state *sreg = + cur_regs(env) + meta->insn.src_reg; + + meta->umin = min(meta->umin, sreg->umin_value); + meta->umax = max(meta->umax, sreg->umax_value); + } + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 80df9a5d4217..4a6d2db75071 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -72,6 +72,42 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } +static int +nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, + struct nfp_fl_payload *nfp_flow, int act_len) +{ + size_t act_size = sizeof(struct nfp_fl_pre_lag); + struct nfp_fl_pre_lag *pre_lag; + struct net_device *out_dev; + int err; + + out_dev = tcf_mirred_dev(action); + if (!out_dev || !netif_is_lag_master(out_dev)) + return 0; + + if (act_len + act_size > NFP_FL_MAX_A_SIZ) + return -EOPNOTSUPP; + + /* Pre_lag action must be first on action list. + * If other actions already exist they need pushed forward. + */ + if (act_len) + memmove(nfp_flow->action_data + act_size, + nfp_flow->action_data, act_len); + + pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data; + err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag); + if (err) + return err; + + pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG; + pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ; + + nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + + return act_size; +} + static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, enum nfp_flower_tun_type tun_type) { @@ -88,12 +124,13 @@ static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, } static int -nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, - struct nfp_fl_payload *nfp_flow, bool last, - struct net_device *in_dev, enum nfp_flower_tun_type tun_type, - int *tun_out_cnt) +nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, + const struct tc_action *action, struct nfp_fl_payload *nfp_flow, + bool last, struct net_device *in_dev, + enum nfp_flower_tun_type tun_type, int *tun_out_cnt) { size_t act_size = sizeof(struct nfp_fl_output); + struct nfp_flower_priv *priv = app->priv; struct net_device *out_dev; u16 tmp_flags; @@ -118,6 +155,15 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, output->flags = cpu_to_be16(tmp_flags | NFP_FL_OUT_FLAGS_USE_TUN); output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); + } else if (netif_is_lag_master(out_dev) && + priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + int gid; + + output->flags = cpu_to_be16(tmp_flags); + gid = nfp_flower_lag_get_output_id(app, out_dev); + if (gid < 0) + return gid; + output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); } else { /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); @@ -164,7 +210,7 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) struct nfp_fl_pre_tunnel *pre_tun_act; /* Pre_tunnel action must be first on action list. - * If other actions already exist they need pushed forward. + * If other actions already exist they need to be pushed forward. */ if (act_len) memmove(act_data + act_size, act_data, act_len); @@ -443,42 +489,73 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) } static int -nfp_flower_loop_action(const struct tc_action *a, +nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, + struct nfp_fl_payload *nfp_fl, int *a_len, + struct net_device *netdev, bool last, + enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, + int *out_cnt) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_output *output; + int err, prelag_size; + + if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) + return -EOPNOTSUPP; + + output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; + err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type, + tun_out_cnt); + if (err) + return err; + + *a_len += sizeof(struct nfp_fl_output); + + if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + /* nfp_fl_pre_lag returns -err or size of prelag action added. + * This will be 0 if it is not egressing to a lag dev. + */ + prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len); + if (prelag_size < 0) + return prelag_size; + else if (prelag_size > 0 && (!last || *out_cnt)) + return -EOPNOTSUPP; + + *a_len += prelag_size; + } + (*out_cnt)++; + + return 0; +} + +static int +nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, - enum nfp_flower_tun_type *tun_type, int *tun_out_cnt) + enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, + int *out_cnt) { struct nfp_fl_set_ipv4_udp_tun *set_tun; struct nfp_fl_pre_tunnel *pre_tun; struct nfp_fl_push_vlan *psh_v; struct nfp_fl_pop_vlan *pop_v; - struct nfp_fl_output *output; int err; if (is_tcf_gact_shot(a)) { nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); } else if (is_tcf_mirred_egress_redirect(a)) { - if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) - return -EOPNOTSUPP; - - output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type, - tun_out_cnt); + err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + true, tun_type, tun_out_cnt, + out_cnt); if (err) return err; - *a_len += sizeof(struct nfp_fl_output); } else if (is_tcf_mirred_egress_mirror(a)) { - if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) - return -EOPNOTSUPP; - - output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type, - tun_out_cnt); + err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + false, tun_type, tun_out_cnt, + out_cnt); if (err) return err; - *a_len += sizeof(struct nfp_fl_output); } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -535,11 +612,12 @@ nfp_flower_loop_action(const struct tc_action *a, return 0; } -int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_action(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow) { - int act_len, act_cnt, err, tun_out_cnt; + int act_len, act_cnt, err, tun_out_cnt, out_cnt; enum nfp_flower_tun_type tun_type; const struct tc_action *a; LIST_HEAD(actions); @@ -550,11 +628,12 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, act_len = 0; act_cnt = 0; tun_out_cnt = 0; + out_cnt = 0; tcf_exts_to_list(flow->exts, &actions); list_for_each_entry(a, &actions, list) { - err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev, - &tun_type, &tun_out_cnt); + err = nfp_flower_loop_action(app, a, nfp_flow, &act_len, netdev, + &tun_type, &tun_out_cnt, &out_cnt); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 577659f332e4..cb8565222621 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -239,8 +239,10 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) static void nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { + struct nfp_flower_priv *app_priv = app->priv; struct nfp_flower_cmsg_hdr *cmsg_hdr; enum nfp_flower_cmsg_type_port type; + bool skb_stored = false; cmsg_hdr = nfp_flower_cmsg_get_hdr(skb); @@ -258,13 +260,20 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: nfp_tunnel_keep_alive(app, skb); break; + case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG: + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + skb_stored = nfp_flower_lag_unprocessed_msg(app, skb); + break; + } + /* fall through */ default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); goto out; } - dev_consume_skb_any(skb); + if (!skb_stored) + dev_consume_skb_any(skb); return; out: dev_kfree_skb_any(skb); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index bee4367a2c38..4a7f3510a296 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -92,6 +92,7 @@ #define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 #define NFP_FL_ACTION_OPCODE_SET_UDP 14 #define NFP_FL_ACTION_OPCODE_SET_TCP 15 +#define NFP_FL_ACTION_OPCODE_PRE_LAG 16 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_NUM 32 @@ -103,6 +104,9 @@ #define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) +/* LAG ports */ +#define NFP_FL_LAG_OUT 0xC0DE0000 + /* Tunnel ports */ #define NFP_FL_PORT_TYPE_TUN 0x50000000 #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) @@ -177,6 +181,15 @@ struct nfp_fl_pop_vlan { __be16 reserved; }; +struct nfp_fl_pre_lag { + struct nfp_fl_act_head head; + __be16 group_id; + u8 lag_version[3]; + u8 instance; +}; + +#define NFP_FL_PRE_LAG_VER_OFF 8 + struct nfp_fl_pre_tunnel { struct nfp_fl_act_head head; __be16 reserved; @@ -366,6 +379,7 @@ struct nfp_flower_cmsg_hdr { enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, + NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4, NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c new file mode 100644 index 000000000000..0c4c957717ea --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -0,0 +1,726 @@ +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "main.h" + +/* LAG group config flags. */ +#define NFP_FL_LAG_LAST BIT(1) +#define NFP_FL_LAG_FIRST BIT(2) +#define NFP_FL_LAG_DATA BIT(3) +#define NFP_FL_LAG_XON BIT(4) +#define NFP_FL_LAG_SYNC BIT(5) +#define NFP_FL_LAG_SWITCH BIT(6) +#define NFP_FL_LAG_RESET BIT(7) + +/* LAG port state flags. */ +#define NFP_PORT_LAG_LINK_UP BIT(0) +#define NFP_PORT_LAG_TX_ENABLED BIT(1) +#define NFP_PORT_LAG_CHANGED BIT(2) + +enum nfp_fl_lag_batch { + NFP_FL_LAG_BATCH_FIRST, + NFP_FL_LAG_BATCH_MEMBER, + NFP_FL_LAG_BATCH_FINISHED +}; + +/** + * struct nfp_flower_cmsg_lag_config - control message payload for LAG config + * @ctrl_flags: Configuration flags + * @reserved: Reserved for future use + * @ttl: Time to live of packet - host always sets to 0xff + * @pkt_number: Config message packet number - increment for each message + * @batch_ver: Batch version of messages - increment for each batch of messages + * @group_id: Group ID applicable + * @group_inst: Group instance number - increment when group is reused + * @members: Array of 32-bit words listing all active group members + */ +struct nfp_flower_cmsg_lag_config { + u8 ctrl_flags; + u8 reserved[2]; + u8 ttl; + __be32 pkt_number; + __be32 batch_ver; + __be32 group_id; + __be32 group_inst; + __be32 members[]; +}; + +/** + * struct nfp_fl_lag_group - list entry for each LAG group + * @group_id: Assigned group ID for host/kernel sync + * @group_inst: Group instance in case of ID reuse + * @list: List entry + * @master_ndev: Group master Netdev + * @dirty: Marked if the group needs synced to HW + * @offloaded: Marked if the group is currently offloaded to NIC + * @to_remove: Marked if the group should be removed from NIC + * @to_destroy: Marked if the group should be removed from driver + * @slave_cnt: Number of slaves in group + */ +struct nfp_fl_lag_group { + unsigned int group_id; + u8 group_inst; + struct list_head list; + struct net_device *master_ndev; + bool dirty; + bool offloaded; + bool to_remove; + bool to_destroy; + unsigned int slave_cnt; +}; + +#define NFP_FL_LAG_PKT_NUMBER_MASK GENMASK(30, 0) +#define NFP_FL_LAG_VERSION_MASK GENMASK(22, 0) +#define NFP_FL_LAG_HOST_TTL 0xff + +/* Use this ID with zero members to ack a batch config */ +#define NFP_FL_LAG_SYNC_ID 0 +#define NFP_FL_LAG_GROUP_MIN 1 /* ID 0 reserved */ +#define NFP_FL_LAG_GROUP_MAX 32 /* IDs 1 to 31 are valid */ + +/* wait for more config */ +#define NFP_FL_LAG_DELAY (msecs_to_jiffies(2)) + +#define NFP_FL_LAG_RETRANS_LIMIT 100 /* max retrans cmsgs to store */ + +static unsigned int nfp_fl_get_next_pkt_number(struct nfp_fl_lag *lag) +{ + lag->pkt_num++; + lag->pkt_num &= NFP_FL_LAG_PKT_NUMBER_MASK; + + return lag->pkt_num; +} + +static void nfp_fl_increment_version(struct nfp_fl_lag *lag) +{ + /* LSB is not considered by firmware so add 2 for each increment. */ + lag->batch_ver += 2; + lag->batch_ver &= NFP_FL_LAG_VERSION_MASK; + + /* Zero is reserved by firmware. */ + if (!lag->batch_ver) + lag->batch_ver += 2; +} + +static struct nfp_fl_lag_group * +nfp_fl_lag_group_create(struct nfp_fl_lag *lag, struct net_device *master) +{ + struct nfp_fl_lag_group *group; + struct nfp_flower_priv *priv; + int id; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + id = ida_simple_get(&lag->ida_handle, NFP_FL_LAG_GROUP_MIN, + NFP_FL_LAG_GROUP_MAX, GFP_KERNEL); + if (id < 0) { + nfp_flower_cmsg_warn(priv->app, + "No more bonding groups available\n"); + return ERR_PTR(id); + } + + group = kmalloc(sizeof(*group), GFP_KERNEL); + if (!group) { + ida_simple_remove(&lag->ida_handle, id); + return ERR_PTR(-ENOMEM); + } + + group->group_id = id; + group->master_ndev = master; + group->dirty = true; + group->offloaded = false; + group->to_remove = false; + group->to_destroy = false; + group->slave_cnt = 0; + group->group_inst = ++lag->global_inst; + list_add_tail(&group->list, &lag->group_list); + + return group; +} + +static struct nfp_fl_lag_group * +nfp_fl_lag_find_group_for_master_with_lag(struct nfp_fl_lag *lag, + struct net_device *master) +{ + struct nfp_fl_lag_group *entry; + + if (!master) + return NULL; + + list_for_each_entry(entry, &lag->group_list, list) + if (entry->master_ndev == master) + return entry; + + return NULL; +} + +int nfp_flower_lag_populate_pre_action(struct nfp_app *app, + struct net_device *master, + struct nfp_fl_pre_lag *pre_act) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_lag_group *group = NULL; + __be32 temp_vers; + + mutex_lock(&priv->nfp_lag.lock); + group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag, + master); + if (!group) { + mutex_unlock(&priv->nfp_lag.lock); + return -ENOENT; + } + + pre_act->group_id = cpu_to_be16(group->group_id); + temp_vers = cpu_to_be32(priv->nfp_lag.batch_ver << + NFP_FL_PRE_LAG_VER_OFF); + memcpy(pre_act->lag_version, &temp_vers, 3); + pre_act->instance = group->group_inst; + mutex_unlock(&priv->nfp_lag.lock); + + return 0; +} + +int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_lag_group *group = NULL; + int group_id = -ENOENT; + + mutex_lock(&priv->nfp_lag.lock); + group = nfp_fl_lag_find_group_for_master_with_lag(&priv->nfp_lag, + master); + if (group) + group_id = group->group_id; + mutex_unlock(&priv->nfp_lag.lock); + + return group_id; +} + +static int +nfp_fl_lag_config_group(struct nfp_fl_lag *lag, struct nfp_fl_lag_group *group, + struct net_device **active_members, + unsigned int member_cnt, enum nfp_fl_lag_batch *batch) +{ + struct nfp_flower_cmsg_lag_config *cmsg_payload; + struct nfp_flower_priv *priv; + unsigned long int flags; + unsigned int size, i; + struct sk_buff *skb; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + size = sizeof(*cmsg_payload) + sizeof(__be32) * member_cnt; + skb = nfp_flower_cmsg_alloc(priv->app, size, + NFP_FLOWER_CMSG_TYPE_LAG_CONFIG, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + cmsg_payload = nfp_flower_cmsg_get_data(skb); + flags = 0; + + /* Increment batch version for each new batch of config messages. */ + if (*batch == NFP_FL_LAG_BATCH_FIRST) { + flags |= NFP_FL_LAG_FIRST; + nfp_fl_increment_version(lag); + *batch = NFP_FL_LAG_BATCH_MEMBER; + } + + /* If it is a reset msg then it is also the end of the batch. */ + if (lag->rst_cfg) { + flags |= NFP_FL_LAG_RESET; + *batch = NFP_FL_LAG_BATCH_FINISHED; + } + + /* To signal the end of a batch, both the switch and last flags are set + * and the the reserved SYNC group ID is used. + */ + if (*batch == NFP_FL_LAG_BATCH_FINISHED) { + flags |= NFP_FL_LAG_SWITCH | NFP_FL_LAG_LAST; + lag->rst_cfg = false; + cmsg_payload->group_id = cpu_to_be32(NFP_FL_LAG_SYNC_ID); + cmsg_payload->group_inst = 0; + } else { + cmsg_payload->group_id = cpu_to_be32(group->group_id); + cmsg_payload->group_inst = cpu_to_be32(group->group_inst); + } + + cmsg_payload->reserved[0] = 0; + cmsg_payload->reserved[1] = 0; + cmsg_payload->ttl = NFP_FL_LAG_HOST_TTL; + cmsg_payload->ctrl_flags = flags; + cmsg_payload->batch_ver = cpu_to_be32(lag->batch_ver); + cmsg_payload->pkt_number = cpu_to_be32(nfp_fl_get_next_pkt_number(lag)); + + for (i = 0; i < member_cnt; i++) + cmsg_payload->members[i] = + cpu_to_be32(nfp_repr_get_port_id(active_members[i])); + + nfp_ctrl_tx(priv->app->ctrl, skb); + return 0; +} + +static void nfp_fl_lag_do_work(struct work_struct *work) +{ + enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; + struct nfp_fl_lag_group *entry, *storage; + struct delayed_work *delayed_work; + struct nfp_flower_priv *priv; + struct nfp_fl_lag *lag; + int err; + + delayed_work = to_delayed_work(work); + lag = container_of(delayed_work, struct nfp_fl_lag, work); + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + mutex_lock(&lag->lock); + list_for_each_entry_safe(entry, storage, &lag->group_list, list) { + struct net_device *iter_netdev, **acti_netdevs; + struct nfp_flower_repr_priv *repr_priv; + int active_count = 0, slaves = 0; + struct nfp_repr *repr; + unsigned long *flags; + + if (entry->to_remove) { + /* Active count of 0 deletes group on hw. */ + err = nfp_fl_lag_config_group(lag, entry, NULL, 0, + &batch); + if (!err) { + entry->to_remove = false; + entry->offloaded = false; + } else { + nfp_flower_cmsg_warn(priv->app, + "group delete failed\n"); + schedule_delayed_work(&lag->work, + NFP_FL_LAG_DELAY); + continue; + } + + if (entry->to_destroy) { + ida_simple_remove(&lag->ida_handle, + entry->group_id); + list_del(&entry->list); + kfree(entry); + } + continue; + } + + acti_netdevs = kmalloc_array(entry->slave_cnt, + sizeof(*acti_netdevs), GFP_KERNEL); + + /* Include sanity check in the loop. It may be that a bond has + * changed between processing the last notification and the + * work queue triggering. If the number of slaves has changed + * or it now contains netdevs that cannot be offloaded, ignore + * the group until pending notifications are processed. + */ + rcu_read_lock(); + for_each_netdev_in_bond_rcu(entry->master_ndev, iter_netdev) { + if (!nfp_netdev_is_nfp_repr(iter_netdev)) { + slaves = 0; + break; + } + + repr = netdev_priv(iter_netdev); + + if (repr->app != priv->app) { + slaves = 0; + break; + } + + slaves++; + if (slaves > entry->slave_cnt) + break; + + /* Check the ports for state changes. */ + repr_priv = repr->app_priv; + flags = &repr_priv->lag_port_flags; + + if (*flags & NFP_PORT_LAG_CHANGED) { + *flags &= ~NFP_PORT_LAG_CHANGED; + entry->dirty = true; + } + + if ((*flags & NFP_PORT_LAG_TX_ENABLED) && + (*flags & NFP_PORT_LAG_LINK_UP)) + acti_netdevs[active_count++] = iter_netdev; + } + rcu_read_unlock(); + + if (slaves != entry->slave_cnt || !entry->dirty) { + kfree(acti_netdevs); + continue; + } + + err = nfp_fl_lag_config_group(lag, entry, acti_netdevs, + active_count, &batch); + if (!err) { + entry->offloaded = true; + entry->dirty = false; + } else { + nfp_flower_cmsg_warn(priv->app, + "group offload failed\n"); + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + } + + kfree(acti_netdevs); + } + + /* End the config batch if at least one packet has been batched. */ + if (batch == NFP_FL_LAG_BATCH_MEMBER) { + batch = NFP_FL_LAG_BATCH_FINISHED; + err = nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); + if (err) + nfp_flower_cmsg_warn(priv->app, + "group batch end cmsg failed\n"); + } + + mutex_unlock(&lag->lock); +} + +static int +nfp_fl_lag_put_unprocessed(struct nfp_fl_lag *lag, struct sk_buff *skb) +{ + struct nfp_flower_cmsg_lag_config *cmsg_payload; + + cmsg_payload = nfp_flower_cmsg_get_data(skb); + if (be32_to_cpu(cmsg_payload->group_id) >= NFP_FL_LAG_GROUP_MAX) + return -EINVAL; + + /* Drop cmsg retrans if storage limit is exceeded to prevent + * overloading. If the fw notices that expected messages have not been + * received in a given time block, it will request a full resync. + */ + if (skb_queue_len(&lag->retrans_skbs) >= NFP_FL_LAG_RETRANS_LIMIT) + return -ENOSPC; + + __skb_queue_tail(&lag->retrans_skbs, skb); + + return 0; +} + +static void nfp_fl_send_unprocessed(struct nfp_fl_lag *lag) +{ + struct nfp_flower_priv *priv; + struct sk_buff *skb; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + while ((skb = __skb_dequeue(&lag->retrans_skbs))) + nfp_ctrl_tx(priv->app->ctrl, skb); +} + +bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_flower_cmsg_lag_config *cmsg_payload; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_lag_group *group_entry; + unsigned long int flags; + bool store_skb = false; + int err; + + cmsg_payload = nfp_flower_cmsg_get_data(skb); + flags = cmsg_payload->ctrl_flags; + + /* Note the intentional fall through below. If DATA and XON are both + * set, the message will stored and sent again with the rest of the + * unprocessed messages list. + */ + + /* Store */ + if (flags & NFP_FL_LAG_DATA) + if (!nfp_fl_lag_put_unprocessed(&priv->nfp_lag, skb)) + store_skb = true; + + /* Send stored */ + if (flags & NFP_FL_LAG_XON) + nfp_fl_send_unprocessed(&priv->nfp_lag); + + /* Resend all */ + if (flags & NFP_FL_LAG_SYNC) { + /* To resend all config: + * 1) Clear all unprocessed messages + * 2) Mark all groups dirty + * 3) Reset NFP group config + * 4) Schedule a LAG config update + */ + + __skb_queue_purge(&priv->nfp_lag.retrans_skbs); + + mutex_lock(&priv->nfp_lag.lock); + list_for_each_entry(group_entry, &priv->nfp_lag.group_list, + list) + group_entry->dirty = true; + + err = nfp_flower_lag_reset(&priv->nfp_lag); + if (err) + nfp_flower_cmsg_warn(priv->app, + "mem err in group reset msg\n"); + mutex_unlock(&priv->nfp_lag.lock); + + schedule_delayed_work(&priv->nfp_lag.work, 0); + } + + return store_skb; +} + +static void +nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, + struct nfp_fl_lag_group *group) +{ + group->to_remove = true; + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); +} + +static int +nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, + struct net_device *master) +{ + struct nfp_fl_lag_group *group; + + mutex_lock(&lag->lock); + group = nfp_fl_lag_find_group_for_master_with_lag(lag, master); + if (!group) { + mutex_unlock(&lag->lock); + return -ENOENT; + } + + group->to_remove = true; + group->to_destroy = true; + mutex_unlock(&lag->lock); + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + return 0; +} + +static int +nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *upper = info->upper_dev, *iter_netdev; + struct netdev_lag_upper_info *lag_upper_info; + struct nfp_fl_lag_group *group; + struct nfp_flower_priv *priv; + unsigned int slave_count = 0; + bool can_offload = true; + struct nfp_repr *repr; + + if (!netif_is_lag_master(upper)) + return 0; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, iter_netdev) { + if (!nfp_netdev_is_nfp_repr(iter_netdev)) { + can_offload = false; + break; + } + repr = netdev_priv(iter_netdev); + + /* Ensure all ports are created by the same app/on same card. */ + if (repr->app != priv->app) { + can_offload = false; + break; + } + + slave_count++; + } + rcu_read_unlock(); + + lag_upper_info = info->upper_info; + + /* Firmware supports active/backup and L3/L4 hash bonds. */ + if (lag_upper_info && + lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP && + (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH || + (lag_upper_info->hash_type != NETDEV_LAG_HASH_L34 && + lag_upper_info->hash_type != NETDEV_LAG_HASH_E34))) { + can_offload = false; + nfp_flower_cmsg_warn(priv->app, + "Unable to offload tx_type %u hash %u\n", + lag_upper_info->tx_type, + lag_upper_info->hash_type); + } + + mutex_lock(&lag->lock); + group = nfp_fl_lag_find_group_for_master_with_lag(lag, upper); + + if (slave_count == 0 || !can_offload) { + /* Cannot offload the group - remove if previously offloaded. */ + if (group && group->offloaded) + nfp_fl_lag_schedule_group_remove(lag, group); + + mutex_unlock(&lag->lock); + return 0; + } + + if (!group) { + group = nfp_fl_lag_group_create(lag, upper); + if (IS_ERR(group)) { + mutex_unlock(&lag->lock); + return PTR_ERR(group); + } + } + + group->dirty = true; + group->slave_cnt = slave_count; + + /* Group may have been on queue for removal but is now offfloable. */ + group->to_remove = false; + mutex_unlock(&lag->lock); + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + return 0; +} + +static int +nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_flower_priv *priv; + struct nfp_repr *repr; + unsigned long *flags; + + if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev)) + return 0; + + lag_lower_info = info->lower_state_info; + if (!lag_lower_info) + return 0; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + repr = netdev_priv(netdev); + + /* Verify that the repr is associated with this app. */ + if (repr->app != priv->app) + return 0; + + repr_priv = repr->app_priv; + flags = &repr_priv->lag_port_flags; + + mutex_lock(&lag->lock); + if (lag_lower_info->link_up) + *flags |= NFP_PORT_LAG_LINK_UP; + else + *flags &= ~NFP_PORT_LAG_LINK_UP; + + if (lag_lower_info->tx_enabled) + *flags |= NFP_PORT_LAG_TX_ENABLED; + else + *flags &= ~NFP_PORT_LAG_TX_ENABLED; + + *flags |= NFP_PORT_LAG_CHANGED; + mutex_unlock(&lag->lock); + + schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); + return 0; +} + +static int +nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct net_device *netdev; + struct nfp_fl_lag *lag; + int err; + + netdev = netdev_notifier_info_to_dev(ptr); + lag = container_of(nb, struct nfp_fl_lag, lag_nb); + + switch (event) { + case NETDEV_CHANGEUPPER: + err = nfp_fl_lag_changeupper_event(lag, ptr); + if (err) + return NOTIFY_BAD; + return NOTIFY_OK; + case NETDEV_CHANGELOWERSTATE: + err = nfp_fl_lag_changels_event(lag, netdev, ptr); + if (err) + return NOTIFY_BAD; + return NOTIFY_OK; + case NETDEV_UNREGISTER: + if (netif_is_bond_master(netdev)) { + err = nfp_fl_lag_schedule_group_delete(lag, netdev); + if (err) + return NOTIFY_BAD; + return NOTIFY_OK; + } + } + + return NOTIFY_DONE; +} + +int nfp_flower_lag_reset(struct nfp_fl_lag *lag) +{ + enum nfp_fl_lag_batch batch = NFP_FL_LAG_BATCH_FIRST; + + lag->rst_cfg = true; + return nfp_fl_lag_config_group(lag, NULL, NULL, 0, &batch); +} + +void nfp_flower_lag_init(struct nfp_fl_lag *lag) +{ + INIT_DELAYED_WORK(&lag->work, nfp_fl_lag_do_work); + INIT_LIST_HEAD(&lag->group_list); + mutex_init(&lag->lock); + ida_init(&lag->ida_handle); + + __skb_queue_head_init(&lag->retrans_skbs); + + /* 0 is a reserved batch version so increment to first valid value. */ + nfp_fl_increment_version(lag); + + lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event; +} + +void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) +{ + struct nfp_fl_lag_group *entry, *storage; + + cancel_delayed_work_sync(&lag->work); + + __skb_queue_purge(&lag->retrans_skbs); + + /* Remove all groups. */ + mutex_lock(&lag->lock); + list_for_each_entry_safe(entry, storage, &lag->group_list, list) { + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&lag->lock); + mutex_destroy(&lag->lock); + ida_destroy(&lag->ida_handle); +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 84e3b9f5abb1..19cfa162ac65 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -185,6 +185,10 @@ nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) static void nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) { + struct nfp_repr *repr = netdev_priv(netdev); + + kfree(repr->app_priv); + tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, netdev_priv(netdev)); } @@ -225,7 +229,9 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; + struct nfp_flower_repr_priv *repr_priv; enum nfp_port_type port_type; + struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; int i, err, reify_cnt; const u8 queue = 0; @@ -247,12 +253,25 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, err = -ENOMEM; goto err_reprs_clean; } - RCU_INIT_POINTER(reprs->reprs[i], repr); + + repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); + if (!repr_priv) { + err = -ENOMEM; + goto err_reprs_clean; + } + + nfp_repr = netdev_priv(repr); + nfp_repr->app_priv = repr_priv; /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); port = nfp_port_alloc(app, port_type, repr); + if (IS_ERR(port)) { + err = PTR_ERR(port); + nfp_repr_free(repr); + goto err_reprs_clean; + } if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; port->vnic = priv->nn->dp.ctrl_bar; @@ -271,9 +290,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); + nfp_repr_free(repr); goto err_reprs_clean; } + RCU_INIT_POINTER(reprs->reprs[i], repr); nfp_info(app->cpp, "%s%d Representor(%s) created\n", repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, repr->name); @@ -318,6 +339,8 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; int err, reify_cnt; @@ -344,16 +367,26 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) err = -ENOMEM; goto err_reprs_clean; } - RCU_INIT_POINTER(reprs->reprs[phys_port], repr); + + repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); + if (!repr_priv) { + err = -ENOMEM; + goto err_reprs_clean; + } + + nfp_repr = netdev_priv(repr); + nfp_repr->app_priv = repr_priv; port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { err = PTR_ERR(port); + nfp_repr_free(repr); goto err_reprs_clean; } err = nfp_port_init_phy_port(app->pf, app, port, i); if (err) { nfp_port_free(port); + nfp_repr_free(repr); goto err_reprs_clean; } @@ -365,6 +398,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) cmsg_port_id, port, priv->nn->dp.netdev); if (err) { nfp_port_free(port); + nfp_repr_free(repr); goto err_reprs_clean; } @@ -373,6 +407,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) eth_tbl->ports[i].base, phys_port); + RCU_INIT_POINTER(reprs->reprs[phys_port], repr); nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, repr->name); } @@ -537,8 +572,22 @@ static int nfp_flower_init(struct nfp_app *app) else app_priv->flower_ext_feats = features; + /* Tell the firmware that the driver supports lag. */ + err = nfp_rtsym_write_le(app->pf->rtbl, + "_abi_flower_balance_sync_enable", 1); + if (!err) { + app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; + nfp_flower_lag_init(&app_priv->nfp_lag); + } else if (err == -ENOENT) { + nfp_warn(app->cpp, "LAG not supported by FW.\n"); + } else { + goto err_cleanup_metadata; + } + return 0; +err_cleanup_metadata: + nfp_flower_metadata_cleanup(app); err_free_app_priv: vfree(app->priv); return err; @@ -552,6 +601,9 @@ static void nfp_flower_clean(struct nfp_app *app) skb_queue_purge(&app_priv->cmsg_skbs_low); flush_work(&app_priv->cmsg_work); + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + nfp_flower_lag_cleanup(&app_priv->nfp_lag); + nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; @@ -618,11 +670,29 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, static int nfp_flower_start(struct nfp_app *app) { + struct nfp_flower_priv *app_priv = app->priv; + int err; + + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + err = nfp_flower_lag_reset(&app_priv->nfp_lag); + if (err) + return err; + + err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb); + if (err) + return err; + } + return nfp_tunnel_config_start(app); } static void nfp_flower_stop(struct nfp_app *app) { + struct nfp_flower_priv *app_priv = app->priv; + + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) + unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb); + nfp_tunnel_config_stop(app); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c67e1b54c614..bbe5764d26cb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -43,10 +43,13 @@ #include <net/pkt_cls.h> #include <net/tcp.h> #include <linux/workqueue.h> +#include <linux/idr.h> +struct nfp_fl_pre_lag; struct net_device; struct nfp_app; +#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) #define NFP_FL_STATS_ENTRY_RS BIT(20) #define NFP_FL_STATS_ELEM_RS 4 #define NFP_FL_REPEATED_HASH_MAX BIT(17) @@ -66,6 +69,7 @@ struct nfp_app; /* Extra features bitmap. */ #define NFP_FL_FEATS_GENEVE BIT(0) #define NFP_FL_NBI_MTU_SETTING BIT(1) +#define NFP_FL_FEATS_LAG BIT(31) struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; @@ -96,6 +100,33 @@ struct nfp_mtu_conf { }; /** + * struct nfp_fl_lag - Flower APP priv data for link aggregation + * @lag_nb: Notifier to track master/slave events + * @work: Work queue for writing configs to the HW + * @lock: Lock to protect lag_group_list + * @group_list: List of all master/slave groups offloaded + * @ida_handle: IDA to handle group ids + * @pkt_num: Incremented for each config packet sent + * @batch_ver: Incremented for each batch of config packets + * @global_inst: Instance allocator for groups + * @rst_cfg: Marker to reset HW LAG config + * @retrans_skbs: Cmsgs that could not be processed by HW and require + * retransmission + */ +struct nfp_fl_lag { + struct notifier_block lag_nb; + struct delayed_work work; + struct mutex lock; + struct list_head group_list; + struct ida ida_handle; + unsigned int pkt_num; + unsigned int batch_ver; + u8 global_inst; + bool rst_cfg; + struct sk_buff_head retrans_skbs; +}; + +/** * struct nfp_flower_priv - Flower APP per-vNIC priv data * @app: Back pointer to app * @nn: Pointer to vNIC @@ -127,6 +158,7 @@ struct nfp_mtu_conf { * from firmware for repr reify * @reify_wait_queue: wait queue for repr reify response counting * @mtu_conf: Configuration of repr MTU value + * @nfp_lag: Link aggregation data block */ struct nfp_flower_priv { struct nfp_app *app; @@ -156,6 +188,15 @@ struct nfp_flower_priv { atomic_t reify_replies; wait_queue_head_t reify_wait_queue; struct nfp_mtu_conf mtu_conf; + struct nfp_fl_lag nfp_lag; +}; + +/** + * struct nfp_flower_repr_priv - Flower APP per-repr priv data + * @lag_port_flags: Extended port flags to record lag state of repr + */ +struct nfp_flower_repr_priv { + unsigned long lag_port_flags; }; struct nfp_fl_key_ls { @@ -189,9 +230,11 @@ struct nfp_fl_payload { spinlock_t lock; /* lock stats */ struct nfp_fl_stats stats; __be32 nfp_tun_ipv4_addr; + struct net_device *ingress_dev; char *unmasked_data; char *mask_data; char *action_data; + bool ingress_offload; }; struct nfp_fl_stats_frame { @@ -211,17 +254,20 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type); -int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_action(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow); int nfp_compile_flow_metadata(struct nfp_app *app, struct tc_cls_flower_offload *flow, - struct nfp_fl_payload *nfp_flow); + struct nfp_fl_payload *nfp_flow, + struct net_device *netdev); int nfp_modify_flow_metadata(struct nfp_app *app, struct nfp_fl_payload *nfp_flow); struct nfp_fl_payload * -nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); +nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, + struct net_device *netdev, __be32 host_ctx); struct nfp_fl_payload * nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); @@ -236,5 +282,14 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, void *cb_priv); +void nfp_flower_lag_init(struct nfp_fl_lag *lag); +void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); +int nfp_flower_lag_reset(struct nfp_fl_lag *lag); +bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb); +int nfp_flower_lag_populate_pre_action(struct nfp_app *app, + struct net_device *master, + struct nfp_fl_pre_lag *pre_act); +int nfp_flower_lag_get_output_id(struct nfp_app *app, + struct net_device *master); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index db977cf8e933..21668aa435e8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -99,14 +99,18 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) /* Must be called with either RTNL or rcu_read_lock */ struct nfp_fl_payload * -nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie) +nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, + struct net_device *netdev, __be32 host_ctx) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flower_entry; hash_for_each_possible_rcu(priv->flow_table, flower_entry, link, tc_flower_cookie) - if (flower_entry->tc_flower_cookie == tc_flower_cookie) + if (flower_entry->tc_flower_cookie == tc_flower_cookie && + (!netdev || flower_entry->ingress_dev == netdev) && + (host_ctx == NFP_FL_STATS_CTX_DONT_CARE || + flower_entry->meta.host_ctx_id == host_ctx)) return flower_entry; return NULL; @@ -121,13 +125,11 @@ nfp_flower_update_stats(struct nfp_app *app, struct nfp_fl_stats_frame *stats) flower_cookie = be64_to_cpu(stats->stats_cookie); rcu_read_lock(); - nfp_flow = nfp_flower_search_fl_table(app, flower_cookie); + nfp_flow = nfp_flower_search_fl_table(app, flower_cookie, NULL, + stats->stats_con_id); if (!nfp_flow) goto exit_rcu_unlock; - if (nfp_flow->meta.host_ctx_id != stats->stats_con_id) - goto exit_rcu_unlock; - spin_lock(&nfp_flow->lock); nfp_flow->stats.pkts += be32_to_cpu(stats->pkt_count); nfp_flow->stats.bytes += be64_to_cpu(stats->byte_count); @@ -317,7 +319,8 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, int nfp_compile_flow_metadata(struct nfp_app *app, struct tc_cls_flower_offload *flow, - struct nfp_fl_payload *nfp_flow) + struct nfp_fl_payload *nfp_flow, + struct net_device *netdev) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *check_entry; @@ -348,7 +351,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app, nfp_flow->stats.bytes = 0; nfp_flow->stats.used = jiffies; - check_entry = nfp_flower_search_fl_table(app, flow->cookie); + check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, + NFP_FL_STATS_CTX_DONT_CARE); if (check_entry) { if (nfp_release_stats_entry(app, stats_cxt)) return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 114d2ab02a38..c42e64f32333 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -345,7 +345,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } static struct nfp_fl_payload * -nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) { struct nfp_fl_payload *flow_pay; @@ -371,6 +371,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) flow_pay->meta.flags = 0; spin_lock_init(&flow_pay->lock); + flow_pay->ingress_offload = !egress; + return flow_pay; err_free_mask: @@ -402,8 +404,20 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; + struct net_device *ingr_dev; int err; + ingr_dev = egress ? NULL : netdev; + flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + NFP_FL_STATS_CTX_DONT_CARE); + if (flow_pay) { + /* Ignore as duplicate if it has been added by different cb. */ + if (flow_pay->ingress_offload && egress) + return 0; + else + return -EOPNOTSUPP; + } + key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); if (!key_layer) return -ENOMEM; @@ -413,22 +427,25 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_free_key_ls; - flow_pay = nfp_flower_allocate_new(key_layer); + flow_pay = nfp_flower_allocate_new(key_layer, egress); if (!flow_pay) { err = -ENOMEM; goto err_free_key_ls; } + flow_pay->ingress_dev = egress ? NULL : netdev; + err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, tun_type); if (err) goto err_destroy_flow; - err = nfp_flower_compile_action(flow, netdev, flow_pay); + err = nfp_flower_compile_action(app, flow, netdev, flow_pay); if (err) goto err_destroy_flow; - err = nfp_compile_flow_metadata(app, flow, flow_pay); + err = nfp_compile_flow_metadata(app, flow, flow_pay, + flow_pay->ingress_dev); if (err) goto err_destroy_flow; @@ -462,6 +479,7 @@ err_free_key_ls: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure + * @egress: Netdev is the egress dev. * * Removes a flow from the repeated hash structure and clears the * action payload. @@ -470,15 +488,18 @@ err_free_key_ls: */ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow) + struct tc_cls_flower_offload *flow, bool egress) { struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_fl_payload *nfp_flow; + struct net_device *ingr_dev; int err; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); + ingr_dev = egress ? NULL : netdev; + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + NFP_FL_STATS_CTX_DONT_CARE); if (!nfp_flow) - return -ENOENT; + return egress ? 0 : -ENOENT; err = nfp_modify_flow_metadata(app, nfp_flow); if (err) @@ -505,7 +526,9 @@ err_free_flow: /** * nfp_flower_get_stats() - Populates flow stats obtained from hardware. * @app: Pointer to the APP handle + * @netdev: Netdev structure. * @flow: TC flower classifier offload structure + * @egress: Netdev is the egress dev. * * Populates a flow statistics structure which which corresponds to a * specific flow. @@ -513,14 +536,21 @@ err_free_flow: * Return: negative value on error, 0 if stats populated successfully. */ static int -nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow) +nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, + struct tc_cls_flower_offload *flow, bool egress) { struct nfp_fl_payload *nfp_flow; + struct net_device *ingr_dev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie); + ingr_dev = egress ? NULL : netdev; + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, + NFP_FL_STATS_CTX_DONT_CARE); if (!nfp_flow) return -EINVAL; + if (nfp_flow->ingress_offload && egress) + return 0; + spin_lock_bh(&nfp_flow->lock); tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, nfp_flow->stats.pkts, nfp_flow->stats.used); @@ -543,9 +573,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, case TC_CLSFLOWER_REPLACE: return nfp_flower_add_offload(app, netdev, flower, egress); case TC_CLSFLOWER_DESTROY: - return nfp_flower_del_offload(app, netdev, flower); + return nfp_flower_del_offload(app, netdev, flower, egress); case TC_CLSFLOWER_STATS: - return nfp_flower_get_stats(app, flower); + return nfp_flower_get_stats(app, netdev, flower, egress); } return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_abi.h b/drivers/net/ethernet/netronome/nfp/nfp_abi.h new file mode 100644 index 000000000000..8b56c27931bf --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_abi.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */ +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __NFP_ABI__ +#define __NFP_ABI__ 1 + +#include <linux/types.h> + +#define NFP_MBOX_SYM_NAME "_abi_nfd_pf%u_mbox" +#define NFP_MBOX_SYM_MIN_SIZE 16 /* When no data needed */ + +#define NFP_MBOX_CMD 0x00 +#define NFP_MBOX_RET 0x04 +#define NFP_MBOX_DATA_LEN 0x08 +#define NFP_MBOX_RESERVED 0x0c +#define NFP_MBOX_DATA 0x10 + +/** + * enum nfp_mbox_cmd - PF mailbox commands + * + * @NFP_MBOX_NO_CMD: null command + * Used to indicate previous command has finished. + * + * @NFP_MBOX_POOL_GET: get shared buffer pool info/config + * Input - struct nfp_shared_buf_pool_id + * Output - struct nfp_shared_buf_pool_info_get + * + * @NFP_MBOX_POOL_SET: set shared buffer pool info/config + * Input - struct nfp_shared_buf_pool_info_set + * Output - None + * + * @NFP_MBOX_PCIE_ABM_ENABLE: enable PCIe-side advanced buffer management + * Enable advanced buffer management of the PCIe block. If ABM is disabled + * PCIe block maintains a very short queue of buffers and does tail drop. + * ABM allows more advanced buffering and priority control. + * Input - None + * Output - None + * + * @NFP_MBOX_PCIE_ABM_DISABLE: disable PCIe-side advanced buffer management + * Input - None + * Output - None + */ +enum nfp_mbox_cmd { + NFP_MBOX_NO_CMD = 0x00, + + NFP_MBOX_POOL_GET = 0x01, + NFP_MBOX_POOL_SET = 0x02, + + NFP_MBOX_PCIE_ABM_ENABLE = 0x03, + NFP_MBOX_PCIE_ABM_DISABLE = 0x04, +}; + +#define NFP_SHARED_BUF_COUNT_SYM_NAME "_abi_nfd_pf%u_sb_cnt" +#define NFP_SHARED_BUF_TABLE_SYM_NAME "_abi_nfd_pf%u_sb_tbl" + +/** + * struct nfp_shared_buf - NFP shared buffer description + * @id: numerical user-visible id of the shared buffer + * @size: size in bytes of the buffer + * @ingress_pools_count: number of ingress pools + * @egress_pools_count: number of egress pools + * @ingress_tc_count: number of ingress trafic classes + * @egress_tc_count: number of egress trafic classes + * @pool_size_unit: pool size may be in credits, each credit is + * @pool_size_unit bytes + */ +struct nfp_shared_buf { + __le32 id; + __le32 size; + __le16 ingress_pools_count; + __le16 egress_pools_count; + __le16 ingress_tc_count; + __le16 egress_tc_count; + + __le32 pool_size_unit; +}; + +/** + * struct nfp_shared_buf_pool_id - shared buffer pool identification + * @shared_buf: shared buffer id + * @pool: pool index + */ +struct nfp_shared_buf_pool_id { + __le32 shared_buf; + __le32 pool; +}; + +/** + * struct nfp_shared_buf_pool_info_get - struct devlink_sb_pool_info mirror + * @pool_type: one of enum devlink_sb_pool_type + * @size: pool size in units of SB's @pool_size_unit + * @threshold_type: one of enum devlink_sb_threshold_type + */ +struct nfp_shared_buf_pool_info_get { + __le32 pool_type; + __le32 size; + __le32 threshold_type; +}; + +/** + * struct nfp_shared_buf_pool_info_set - packed args of sb_pool_set + * @id: pool identification info + * @size: pool size in units of SB's @pool_size_unit + * @threshold_type: one of enum devlink_sb_threshold_type + */ +struct nfp_shared_buf_pool_info_set { + struct nfp_shared_buf_pool_id id; + __le32 size; + __le32 threshold_type; +}; + +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 6aedef0ad433..f28b244f4ee7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2017 Netronome Systems, Inc. + * Copyright (C) 2017-2018 Netronome Systems, Inc. * * This software is dual licensed under the GNU General License Version 2, * June 1991 as shown in the file COPYING in the top-level directory of this @@ -43,6 +43,7 @@ #include "nfp_main.h" #include "nfp_net.h" #include "nfp_net_repr.h" +#include "nfp_port.h" static const struct nfp_app_type *apps[] = { [NFP_APP_CORE_NIC] = &app_nic, @@ -54,6 +55,9 @@ static const struct nfp_app_type *apps[] = { #ifdef CONFIG_NFP_APP_FLOWER [NFP_APP_FLOWER_NIC] = &app_flower, #endif +#ifdef CONFIG_NFP_APP_ABM_NIC + [NFP_APP_ACTIVE_BUFFER_MGMT_NIC] = &app_abm, +#endif }; struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) @@ -82,6 +86,27 @@ const char *nfp_app_mip_name(struct nfp_app *app) return nfp_mip_name(app->pf->mip); } +u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data) +{ + if (!port || !port->app || !port->app->type->port_get_stats) + return data; + return port->app->type->port_get_stats(port->app, port, data); +} + +int nfp_app_port_get_stats_count(struct nfp_port *port) +{ + if (!port || !port->app || !port->app->type->port_get_stats_count) + return 0; + return port->app->type->port_get_stats_count(port->app, port); +} + +u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data) +{ + if (!port || !port->app || !port->app->type->port_get_stats_strings) + return data; + return port->app->type->port_get_stats_strings(port->app, port, data); +} + struct sk_buff * nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 2d9cb2528fc7..ee74caacb015 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -57,11 +57,13 @@ enum nfp_app_id { NFP_APP_CORE_NIC = 0x1, NFP_APP_BPF_NIC = 0x2, NFP_APP_FLOWER_NIC = 0x3, + NFP_APP_ACTIVE_BUFFER_MGMT_NIC = 0x4, }; extern const struct nfp_app_type app_nic; extern const struct nfp_app_type app_bpf; extern const struct nfp_app_type app_flower; +extern const struct nfp_app_type app_abm; /** * struct nfp_app_type - application definition @@ -88,6 +90,9 @@ extern const struct nfp_app_type app_flower; * @repr_stop: representor netdev stop callback * @check_mtu: MTU change request on a netdev (verify it is valid) * @repr_change_mtu: MTU change request on repr (make and verify change) + * @port_get_stats: get extra ethtool statistics for a port + * @port_get_stats_count: get count of extra statistics for a port + * @port_get_stats_strings: get strings for extra statistics * @start: start application logic * @stop: stop application logic * @ctrl_msg_rx: control message handler @@ -95,6 +100,7 @@ extern const struct nfp_app_type app_flower; * @bpf: BPF ndo offload-related calls * @xdp_offload: offload an XDP program * @eswitch_mode_get: get SR-IOV eswitch mode + * @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock) * @sriov_enable: app-specific sriov initialisation * @sriov_disable: app-specific sriov clean-up * @repr_get: get representor netdev @@ -129,6 +135,12 @@ struct nfp_app_type { int (*repr_change_mtu)(struct nfp_app *app, struct net_device *netdev, int new_mtu); + u64 *(*port_get_stats)(struct nfp_app *app, + struct nfp_port *port, u64 *data); + int (*port_get_stats_count)(struct nfp_app *app, struct nfp_port *port); + u8 *(*port_get_stats_strings)(struct nfp_app *app, + struct nfp_port *port, u8 *data); + int (*start)(struct nfp_app *app); void (*stop)(struct nfp_app *app); @@ -146,6 +158,7 @@ struct nfp_app_type { void (*sriov_disable)(struct nfp_app *app); enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app); + int (*eswitch_mode_set)(struct nfp_app *app, u16 mode); struct net_device *(*repr_get)(struct nfp_app *app, u32 id); }; @@ -370,6 +383,13 @@ static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode) return 0; } +static inline int nfp_app_eswitch_mode_set(struct nfp_app *app, u16 mode) +{ + if (!app->type->eswitch_mode_set) + return -EOPNOTSUPP; + return app->type->eswitch_mode_set(app, mode); +} + static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs) { if (!app || !app->type->sriov_enable) @@ -393,6 +413,10 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); +u64 *nfp_app_port_get_stats(struct nfp_port *port, u64 *data); +int nfp_app_port_get_stats_count(struct nfp_port *port); +u8 *nfp_app_port_get_stats_strings(struct nfp_port *port, u8 *data); + struct nfp_reprs * nfp_reprs_get_locked(struct nfp_app *app, enum nfp_repr_type type); struct nfp_reprs * @@ -410,5 +434,7 @@ void nfp_app_free(struct nfp_app *app); int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id); +int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, + struct nfp_net *nn, unsigned int id); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c index b9618c37403f..e2dfe4f168bb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app_nic.c @@ -38,9 +38,8 @@ #include "nfp_net.h" #include "nfp_port.h" -static int -nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, - struct nfp_net *nn, unsigned int id) +int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, + struct nfp_net *nn, unsigned int id) { int err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 5f2b2f24f4fa..f6677bc9875a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -72,8 +72,21 @@ #define OP_BR_ADDR_LO 0x007ffc00000ULL #define OP_BR_ADDR_HI 0x10000000000ULL -#define nfp_is_br(_insn) \ - (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE) +#define OP_BR_BIT_BASE 0x0d000000000ULL +#define OP_BR_BIT_BASE_MASK 0x0f800080300ULL +#define OP_BR_BIT_A_SRC 0x000000000ffULL +#define OP_BR_BIT_B_SRC 0x0000003fc00ULL +#define OP_BR_BIT_BV 0x00000040000ULL +#define OP_BR_BIT_SRC_LMEXTN 0x40000000000ULL +#define OP_BR_BIT_DEFBR OP_BR_DEFBR +#define OP_BR_BIT_ADDR_LO OP_BR_ADDR_LO +#define OP_BR_BIT_ADDR_HI OP_BR_ADDR_HI + +static inline bool nfp_is_br(u64 insn) +{ + return (insn & OP_BR_BASE_MASK) == OP_BR_BASE || + (insn & OP_BR_BIT_BASE_MASK) == OP_BR_BIT_BASE; +} enum br_mask { BR_BEQ = 0x00, @@ -161,6 +174,7 @@ enum shf_op { SHF_OP_NONE = 0, SHF_OP_AND = 2, SHF_OP_OR = 5, + SHF_OP_ASHR = 6, }; enum shf_sc { @@ -183,16 +197,18 @@ enum shf_sc { #define OP_ALU_DST_LMEXTN 0x80000000000ULL enum alu_op { - ALU_OP_NONE = 0x00, - ALU_OP_ADD = 0x01, - ALU_OP_NOT = 0x04, - ALU_OP_ADD_2B = 0x05, - ALU_OP_AND = 0x08, - ALU_OP_SUB_C = 0x0d, - ALU_OP_ADD_C = 0x11, - ALU_OP_OR = 0x14, - ALU_OP_SUB = 0x15, - ALU_OP_XOR = 0x18, + ALU_OP_NONE = 0x00, + ALU_OP_ADD = 0x01, + ALU_OP_NOT = 0x04, + ALU_OP_ADD_2B = 0x05, + ALU_OP_AND = 0x08, + ALU_OP_AND_NOT_A = 0x0c, + ALU_OP_SUB_C = 0x0d, + ALU_OP_AND_NOT_B = 0x10, + ALU_OP_ADD_C = 0x11, + ALU_OP_OR = 0x14, + ALU_OP_SUB = 0x15, + ALU_OP_XOR = 0x18, }; enum alu_dst_ab { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index eb0fc614673d..db463e20a876 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -92,7 +92,7 @@ nfp_devlink_set_lanes(struct nfp_pf *pf, unsigned int idx, unsigned int lanes) static int nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, - unsigned int count) + unsigned int count, struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; @@ -123,7 +123,8 @@ out: } static int -nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index) +nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index, + struct netlink_ext_ack *extack) { struct nfp_pf *pf = devlink_priv(devlink); struct nfp_eth_table_port eth_port; @@ -149,6 +150,26 @@ out: return ret; } +static int +nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index, + u16 pool_index, struct devlink_sb_pool_info *pool_info) +{ + struct nfp_pf *pf = devlink_priv(devlink); + + return nfp_shared_buf_pool_get(pf, sb_index, pool_index, pool_info); +} + +static int +nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index, + u16 pool_index, + u32 size, enum devlink_sb_threshold_type threshold_type) +{ + struct nfp_pf *pf = devlink_priv(devlink); + + return nfp_shared_buf_pool_set(pf, sb_index, pool_index, + size, threshold_type); +} + static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct nfp_pf *pf = devlink_priv(devlink); @@ -156,10 +177,25 @@ static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) return nfp_app_eswitch_mode_get(pf->app, mode); } +static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct nfp_pf *pf = devlink_priv(devlink); + int ret; + + mutex_lock(&pf->lock); + ret = nfp_app_eswitch_mode_set(pf->app, mode); + mutex_unlock(&pf->lock); + + return ret; +} + const struct devlink_ops nfp_devlink_ops = { .port_split = nfp_devlink_port_split, .port_unsplit = nfp_devlink_port_unsplit, + .sb_pool_get = nfp_devlink_sb_pool_get, + .sb_pool_set = nfp_devlink_sb_pool_set, .eswitch_mode_get = nfp_devlink_eswitch_mode_get, + .eswitch_mode_set = nfp_devlink_eswitch_mode_set, }; int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) @@ -175,8 +211,9 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret; devlink_port_type_eth_set(&port->dl_port, port->netdev); - if (eth_port.is_split) - devlink_port_split_set(&port->dl_port, eth_port.label_port); + devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, + eth_port.label_port, eth_port.is_split, + eth_port.label_subport); devlink = priv_to_devlink(app->pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index c4b1f344b4da..46b76d5a726c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -55,6 +55,7 @@ #include "nfpcore/nfp6000_pcie.h" +#include "nfp_abi.h" #include "nfp_app.h" #include "nfp_main.h" #include "nfp_net.h" @@ -75,6 +76,122 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, + unsigned int default_val) +{ + char name[256]; + int err = 0; + u64 val; + + snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); + + val = nfp_rtsym_read_le(pf->rtbl, name, &err); + if (err) { + if (err == -ENOENT) + return default_val; + nfp_err(pf->cpp, "Unable to read symbol %s\n", name); + return err; + } + + return val; +} + +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area) +{ + char pf_symbol[256]; + + snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, + nfp_cppcore_pcie_unit(pf->cpp)); + + return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); +} + +/* Callers should hold the devlink instance lock */ +int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, + void *out_data, u64 out_length) +{ + unsigned long long addr; + unsigned long err_at; + u64 max_data_sz; + u32 val = 0; + u32 cpp_id; + int n, err; + + if (!pf->mbox) + return -EOPNOTSUPP; + + cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0, + pf->mbox->domain); + addr = pf->mbox->addr; + max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE; + + /* Check if cmd field is clear */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + if (err || val) { + nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", + cmd, val, err); + return err ?: -EBUSY; + } + + in_length = min(in_length, max_data_sz); + n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, + in_data, in_length); + if (n != in_length) + return -EIO; + /* Write data_len and wipe reserved */ + err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, + in_length); + if (err) + return err; + + /* Read back for ordering */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + if (err) + return err; + + /* Write cmd and wipe return value */ + err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd); + if (err) + return err; + + err_at = jiffies + 5 * HZ; + while (true) { + /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + if (err) + return err; + if (!val) + break; + + if (time_is_before_eq_jiffies(err_at)) + return -ETIMEDOUT; + + msleep(5); + } + + /* Copy output if any (could be error info, do it before reading ret) */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + if (err) + return err; + + out_length = min_t(u32, val, min(out_length, max_data_sz)); + n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, + out_data, out_length); + if (n != out_length) + return -EIO; + + /* Check if there is an error */ + err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val); + if (err) + return err; + if (val) + return -val; + + return out_length; +} + static bool nfp_board_ready(struct nfp_pf *pf) { const char *cp; @@ -436,6 +553,25 @@ static void nfp_fw_unload(struct nfp_pf *pf) nfp_nsp_close(nsp); } +static int nfp_pf_find_rtsyms(struct nfp_pf *pf) +{ + char pf_symbol[256]; + unsigned int pf_id; + + pf_id = nfp_cppcore_pcie_unit(pf->cpp); + + /* Optional per-PCI PF mailbox */ + snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); + pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); + if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) { + nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", + pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE); + return -EINVAL; + } + + return 0; +} + static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { @@ -486,6 +622,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_disable_msix; } + err = nfp_resource_table_init(pf->cpp); + if (err) + goto err_cpp_free; + pf->hwinfo = nfp_hwinfo_read(pf->cpp); dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n", @@ -506,6 +646,10 @@ static int nfp_pci_probe(struct pci_dev *pdev, pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); + err = nfp_pf_find_rtsyms(pf); + if (err) + goto err_fw_unload; + pf->dump_flag = NFP_DUMP_NSP_DIAG; pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl); @@ -548,6 +692,7 @@ err_fw_unload: vfree(pf->dumpspec); err_hwinfo_free: kfree(pf->hwinfo); +err_cpp_free: nfp_cpp_free(pf->cpp); err_disable_msix: destroy_workqueue(pf->wq); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 42211083b51f..595b3dc280e3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -46,10 +46,10 @@ #include <linux/mutex.h> #include <linux/pci.h> #include <linux/workqueue.h> +#include <net/devlink.h> struct dentry; struct device; -struct devlink_ops; struct pci_dev; struct nfp_cpp; @@ -60,7 +60,9 @@ struct nfp_mip; struct nfp_net; struct nfp_nsp_identify; struct nfp_port; +struct nfp_rtsym; struct nfp_rtsym_table; +struct nfp_shared_buf; /** * struct nfp_dumpspec - NFP FW dump specification structure @@ -87,6 +89,7 @@ struct nfp_dumpspec { * @vf_cfg_mem: Pointer to mapped VF configuration area * @vfcfg_tbl2_area: Pointer to the CPP area for the VF config table * @vfcfg_tbl2: Pointer to mapped VF config table + * @mbox: RTSym of per-PCI PF mailbox (under devlink lock) * @irq_entries: Array of MSI-X entries for all vNICs * @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit) * @num_vfs: Number of SR-IOV VFs enabled @@ -108,6 +111,8 @@ struct nfp_dumpspec { * @ports: Linked list of port structures (struct nfp_port) * @wq: Workqueue for running works which need to grab @lock * @port_refresh_work: Work entry for taking netdevs out + * @shared_bufs: Array of shared buffer structures if FW has any SBs + * @num_shared_bufs: Number of elements in @shared_bufs * @lock: Protects all fields which may change after probe */ struct nfp_pf { @@ -127,6 +132,8 @@ struct nfp_pf { struct nfp_cpp_area *vfcfg_tbl2_area; u8 __iomem *vfcfg_tbl2; + const struct nfp_rtsym *mbox; + struct msix_entry *irq_entries; unsigned int limit_vfs; @@ -158,6 +165,9 @@ struct nfp_pf { struct workqueue_struct *wq; struct work_struct port_refresh_work; + struct nfp_shared_buf *shared_bufs; + unsigned int num_shared_bufs; + struct mutex lock; }; @@ -177,6 +187,14 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); +int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, + unsigned int default_val); +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area); +int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, + void *out_data, u64 out_length); + enum nfp_dump_diag { NFP_DUMP_NSP_DIAG = 0, }; @@ -188,4 +206,11 @@ s64 nfp_net_dump_calculate_size(struct nfp_pf *pf, struct nfp_dumpspec *spec, int nfp_net_dump_populate_buffer(struct nfp_pf *pf, struct nfp_dumpspec *spec, struct ethtool_dump *dump_param, void *dest); +int nfp_shared_buf_register(struct nfp_pf *pf); +void nfp_shared_buf_unregister(struct nfp_pf *pf); +int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index, + struct devlink_sb_pool_info *pool_info); +int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index bd7d8ae31e17..57cb035dcc6d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -545,6 +545,7 @@ struct nfp_net_dp { /** * struct nfp_net - NFP network device structure * @dp: Datapath structure + * @id: vNIC id within the PF (0 for VFs) * @fw_ver: Firmware version * @cap: Capabilities advertised by the Firmware * @max_mtu: Maximum support MTU advertised by the Firmware @@ -597,6 +598,8 @@ struct nfp_net { struct nfp_net_fw_version fw_ver; + u32 id; + u32 cap; u32 max_mtu; @@ -909,7 +912,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, void nfp_net_debugfs_create(void); void nfp_net_debugfs_destroy(void); struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev); -void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id); +void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir); void nfp_net_debugfs_dir_clean(struct dentry **dir); #else static inline void nfp_net_debugfs_create(void) @@ -926,7 +929,7 @@ static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) } static inline void -nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) +nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) { } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 1eb6549f2a54..75110c8d6a90 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1722,7 +1722,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) act = bpf_prog_run_xdp(xdp_prog, &xdp); - pkt_len -= xdp.data - orig_data; + pkt_len = xdp.data_end - xdp.data; pkt_off += xdp.data - orig_data; switch (act) { @@ -3277,6 +3277,25 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int +nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) +{ + struct nfp_net *nn = netdev_priv(netdev); + int n; + + if (nn->port) + return nfp_port_get_phys_port_name(netdev, name, len); + + if (nn->dp.is_vf) + return -EOPNOTSUPP; + + n = snprintf(name, len, "n%d", nn->id); + if (n >= len) + return -EINVAL; + + return 0; +} + /** * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW * @nn: NFP Net device to reconfigure @@ -3475,7 +3494,7 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_mac_address = nfp_net_set_mac_address, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, - .ndo_get_phys_port_name = nfp_port_get_phys_port_name, + .ndo_get_phys_port_name = nfp_net_get_phys_port_name, .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_bpf = nfp_net_xdp, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 67cdd8330c59..099b63d67451 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -201,7 +201,7 @@ static const struct file_operations nfp_xdp_q_fops = { .llseek = seq_lseek }; -void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) +void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) { struct dentry *queues, *tx, *rx, *xdp; char name[20]; @@ -211,7 +211,7 @@ void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir, int id) return; if (nfp_net_is_data_vnic(nn)) - sprintf(name, "vnic%d", id); + sprintf(name, "vnic%d", nn->id); else strcpy(name, "ctrl-vnic"); nn->debugfs_dir = debugfs_create_dir(name, ddir); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index c9016419bfa0..26d1cc4e2906 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -437,7 +437,7 @@ static int nfp_net_set_ringparam(struct net_device *netdev, return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } -static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) +__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) { va_list args; @@ -637,6 +637,7 @@ static void nfp_net_get_strings(struct net_device *netdev, nn->dp.num_tx_rings, false); data = nfp_mac_get_stats_strings(netdev, data); + data = nfp_app_port_get_stats_strings(nn->port, data); break; } } @@ -651,6 +652,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->dp.num_rx_rings, nn->dp.num_tx_rings); data = nfp_mac_get_stats(netdev, data); + data = nfp_app_port_get_stats(nn->port, data); } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) @@ -662,7 +664,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) return nfp_vnic_get_sw_stats_count(netdev) + nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, nn->dp.num_tx_rings) + - nfp_mac_get_stats_count(netdev); + nfp_mac_get_stats_count(netdev) + + nfp_app_port_get_stats_count(nn->port); default: return -EOPNOTSUPP; } @@ -679,6 +682,7 @@ static void nfp_port_get_strings(struct net_device *netdev, data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); else data = nfp_mac_get_stats_strings(netdev, data); + data = nfp_app_port_get_stats_strings(port, data); break; } } @@ -693,6 +697,7 @@ nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); else data = nfp_mac_get_stats(netdev, data); + data = nfp_app_port_get_stats(port, data); } static int nfp_port_get_sset_count(struct net_device *netdev, int sset) @@ -706,6 +711,7 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) count = nfp_vnic_get_hw_stats_count(0, 0); else count = nfp_mac_get_stats_count(netdev); + count += nfp_app_port_get_stats_count(port); return count; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 45cd2092e498..28516eecccc8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -101,48 +101,15 @@ nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index) return NULL; } -static int -nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, - unsigned int default_val) -{ - char name[256]; - int err = 0; - u64 val; - - snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); - - val = nfp_rtsym_read_le(pf->rtbl, name, &err); - if (err) { - if (err == -ENOENT) - return default_val; - nfp_err(pf->cpp, "Unable to read symbol %s\n", name); - return err; - } - - return val; -} - static int nfp_net_pf_get_num_ports(struct nfp_pf *pf) { - return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); + return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); } static int nfp_net_pf_get_app_id(struct nfp_pf *pf) { - return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", - NFP_APP_CORE_NIC); -} - -static u8 __iomem * -nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, - unsigned int min_size, struct nfp_cpp_area **area) -{ - char pf_symbol[256]; - - snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, - nfp_cppcore_pcie_unit(pf->cpp)); - - return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); + return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", + NFP_APP_CORE_NIC); } static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) @@ -211,11 +178,13 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) { int err; + nn->id = id; + err = nfp_net_init(nn); if (err) return err; - nfp_net_debugfs_vnic_add(nn, pf->ddir, id); + nfp_net_debugfs_vnic_add(nn, pf->ddir); if (nn->port) { err = nfp_devlink_port_register(pf->app, nn->port); @@ -379,9 +348,8 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) if (!nfp_app_needs_ctrl_vnic(pf->app)) return 0; - ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar", - NFP_PF_CSR_SLICE_SIZE, - &pf->ctrl_vnic_bar); + ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar", + NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar); if (IS_ERR(ctrl_bar)) { nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n"); err = PTR_ERR(ctrl_bar); @@ -507,8 +475,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) int err; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; - mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", - min_size, &pf->data_vnic_bar); + mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", + min_size, &pf->data_vnic_bar); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); return PTR_ERR(mem); @@ -528,10 +496,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } } - pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", - "_pf%d_net_vf_bar", - NFP_NET_CFG_BAR_SZ * - pf->limit_vfs, &pf->vf_cfg_bar); + pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar", + NFP_NET_CFG_BAR_SZ * pf->limit_vfs, + &pf->vf_cfg_bar); if (IS_ERR(pf->vf_cfg_mem)) { if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { err = PTR_ERR(pf->vf_cfg_mem); @@ -541,9 +508,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; - pf->vfcfg_tbl2 = nfp_net_pf_map_rtsym(pf, "net.vfcfg_tbl2", - "_pf%d_net_vf_cfg2", - min_size, &pf->vfcfg_tbl2_area); + pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2", + "_pf%d_net_vf_cfg2", + min_size, &pf->vfcfg_tbl2_area); if (IS_ERR(pf->vfcfg_tbl2)) { if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) { err = PTR_ERR(pf->vfcfg_tbl2); @@ -763,6 +730,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_app_clean; + err = nfp_shared_buf_register(pf); + if (err) + goto err_devlink_unreg; + mutex_lock(&pf->lock); pf->ddir = nfp_net_debugfs_device_add(pf->pdev); @@ -796,6 +767,8 @@ err_free_vnics: err_clean_ddir: nfp_net_debugfs_dir_clean(&pf->ddir); mutex_unlock(&pf->lock); + nfp_shared_buf_unregister(pf); +err_devlink_unreg: cancel_work_sync(&pf->port_refresh_work); devlink_unregister(devlink); err_app_clean: @@ -823,6 +796,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) mutex_unlock(&pf->lock); + nfp_shared_buf_unregister(pf); devlink_unregister(priv_to_devlink(pf)); nfp_net_pf_free_irqs(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 0cd077addb26..d7b712f6362f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -277,6 +277,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_get_vf_config = nfp_app_get_vf_config, .ndo_set_vf_link_state = nfp_app_set_vf_link_state, .ndo_set_features = nfp_port_set_features, + .ndo_set_mac_address = eth_mac_addr, }; static void nfp_repr_clean(struct nfp_repr *repr) @@ -348,18 +349,24 @@ err_clean: return err; } -static void nfp_repr_free(struct nfp_repr *repr) +static void __nfp_repr_free(struct nfp_repr *repr) { free_percpu(repr->stats); free_netdev(repr->netdev); } -struct net_device *nfp_repr_alloc(struct nfp_app *app) +void nfp_repr_free(struct net_device *netdev) +{ + __nfp_repr_free(netdev_priv(netdev)); +} + +struct net_device * +nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs) { struct net_device *netdev; struct nfp_repr *repr; - netdev = alloc_etherdev(sizeof(*repr)); + netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs); if (!netdev) return NULL; @@ -380,12 +387,12 @@ err_free_netdev: return NULL; } -static void nfp_repr_clean_and_free(struct nfp_repr *repr) +void nfp_repr_clean_and_free(struct nfp_repr *repr) { nfp_info(repr->app->cpp, "Destroying Representor(%s)\n", repr->netdev->name); nfp_repr_clean(repr); - nfp_repr_free(repr); + __nfp_repr_free(repr); } void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index a621e8ff528e..1bf2b18109ab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -76,6 +76,7 @@ struct nfp_repr_pcpu_stats { * @port: Port of representor * @app: APP handle * @stats: Statistic of packets hitting CPU + * @app_priv: Pointer for APP data */ struct nfp_repr { struct net_device *netdev; @@ -83,6 +84,7 @@ struct nfp_repr { struct nfp_port *port; struct nfp_app *app; struct nfp_repr_pcpu_stats __percpu *stats; + void *app_priv; }; /** @@ -123,11 +125,18 @@ void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev); -struct net_device *nfp_repr_alloc(struct nfp_app *app); +void nfp_repr_free(struct net_device *netdev); +struct net_device * +nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs); +void nfp_repr_clean_and_free(struct nfp_repr *repr); void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs); void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type); struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs); int nfp_reprs_resync_phys_ports(struct nfp_app *app); +static inline struct net_device *nfp_repr_alloc(struct nfp_app *app) +{ + return nfp_repr_alloc_mqs(app, 1, 1); +} #endif /* NFP_NET_REPR_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index b802a1d55449..68928c86b698 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -283,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, nfp_net_info(nn); vf->ddir = nfp_net_debugfs_device_add(pdev); - nfp_net_debugfs_vnic_add(nn, vf->ddir, 0); + nfp_net_debugfs_vnic_add(nn, vf->ddir); return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 7bd8be5c833b..9c1298114c70 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -181,7 +181,11 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len) eth_port->label_subport); break; case NFP_PORT_PF_PORT: - n = snprintf(name, len, "pf%d", port->pf_id); + if (!port->pf_split) + n = snprintf(name, len, "pf%d", port->pf_id); + else + n = snprintf(name, len, "pf%ds%d", port->pf_id, + port->pf_split_id); break; case NFP_PORT_VF_PORT: n = snprintf(name, len, "pf%dvf%d", port->pf_id, port->vf_id); @@ -218,6 +222,8 @@ int nfp_port_configure(struct net_device *netdev, bool configed) eth_port = __nfp_port_get_eth_port(port); if (!eth_port) return 0; + if (port->eth_forced) + return 0; err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed); return err < 0 && err != -EOPNOTSUPP ? err : 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index fa7e669a969c..51f10ae2d53e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -77,10 +77,13 @@ enum nfp_port_flags { * @app: backpointer to the app structure * @dl_port: devlink port structure * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme + * @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id + * @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC + * @pf_split_id:for %NFP_PORT_PF_PORT ID of PCI PF vNIC (valid if @pf_split) * @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory * @port_list: entry on pf's list of ports */ @@ -99,6 +102,7 @@ struct nfp_port { /* NFP_PORT_PHYS_PORT */ struct { unsigned int eth_id; + bool eth_forced; struct nfp_eth_table_port *eth_port; u8 __iomem *eth_stats; }; @@ -106,6 +110,8 @@ struct nfp_port { struct { unsigned int pf_id; unsigned int vf_id; + bool pf_split; + unsigned int pf_split_id; u8 __iomem *vnic; }; }; @@ -116,6 +122,8 @@ struct nfp_port { extern const struct ethtool_ops nfp_port_ethtool_ops; extern const struct switchdev_ops nfp_port_switchdev_ops; +__printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...); + int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c new file mode 100644 index 000000000000..0ecd83705368 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +/* + * Copyright (C) 2018 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <net/devlink.h> + +#include "nfpcore/nfp_cpp.h" +#include "nfpcore/nfp_nffw.h" +#include "nfp_abi.h" +#include "nfp_app.h" +#include "nfp_main.h" + +static u32 nfp_shared_buf_pool_unit(struct nfp_pf *pf, unsigned int sb) +{ + __le32 sb_id = cpu_to_le32(sb); + unsigned int i; + + for (i = 0; i < pf->num_shared_bufs; i++) + if (pf->shared_bufs[i].id == sb_id) + return le32_to_cpu(pf->shared_bufs[i].pool_size_unit); + + WARN_ON_ONCE(1); + return 0; +} + +int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct nfp_shared_buf_pool_info_get get_data; + struct nfp_shared_buf_pool_id id = { + .shared_buf = cpu_to_le32(sb), + .pool = cpu_to_le32(pool_index), + }; + unsigned int unit_size; + int n; + + unit_size = nfp_shared_buf_pool_unit(pf, sb); + if (!unit_size) + return -EINVAL; + + n = nfp_mbox_cmd(pf, NFP_MBOX_POOL_GET, &id, sizeof(id), + &get_data, sizeof(get_data)); + if (n < 0) + return n; + if (n < sizeof(get_data)) + return -EIO; + + pool_info->pool_type = le32_to_cpu(get_data.pool_type); + pool_info->threshold_type = le32_to_cpu(get_data.threshold_type); + pool_info->size = le32_to_cpu(get_data.size) * unit_size; + + return 0; +} + +int nfp_shared_buf_pool_set(struct nfp_pf *pf, unsigned int sb, + u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type) +{ + struct nfp_shared_buf_pool_info_set set_data = { + .id = { + .shared_buf = cpu_to_le32(sb), + .pool = cpu_to_le32(pool_index), + }, + .threshold_type = cpu_to_le32(threshold_type), + }; + unsigned int unit_size; + + unit_size = nfp_shared_buf_pool_unit(pf, sb); + if (!unit_size || size % unit_size) + return -EINVAL; + set_data.size = cpu_to_le32(size / unit_size); + + return nfp_mbox_cmd(pf, NFP_MBOX_POOL_SET, &set_data, sizeof(set_data), + NULL, 0); +} + +int nfp_shared_buf_register(struct nfp_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + unsigned int i, num_entries, entry_sz; + struct nfp_cpp_area *sb_desc_area; + u8 __iomem *sb_desc; + int n, err; + + if (!pf->mbox) + return 0; + + n = nfp_pf_rtsym_read_optional(pf, NFP_SHARED_BUF_COUNT_SYM_NAME, 0); + if (n <= 0) + return n; + num_entries = n; + + sb_desc = nfp_pf_map_rtsym(pf, "sb_tbl", NFP_SHARED_BUF_TABLE_SYM_NAME, + num_entries * sizeof(pf->shared_bufs[0]), + &sb_desc_area); + if (IS_ERR(sb_desc)) + return PTR_ERR(sb_desc); + + entry_sz = nfp_cpp_area_size(sb_desc_area) / num_entries; + + pf->shared_bufs = kmalloc_array(num_entries, sizeof(pf->shared_bufs[0]), + GFP_KERNEL); + if (!pf->shared_bufs) { + err = -ENOMEM; + goto err_release_area; + } + + for (i = 0; i < num_entries; i++) { + struct nfp_shared_buf *sb = &pf->shared_bufs[i]; + + /* Entries may be larger in future FW */ + memcpy_fromio(sb, sb_desc + i * entry_sz, sizeof(*sb)); + + err = devlink_sb_register(devlink, + le32_to_cpu(sb->id), + le32_to_cpu(sb->size), + le16_to_cpu(sb->ingress_pools_count), + le16_to_cpu(sb->egress_pools_count), + le16_to_cpu(sb->ingress_tc_count), + le16_to_cpu(sb->egress_tc_count)); + if (err) + goto err_unreg_prev; + } + pf->num_shared_bufs = num_entries; + + nfp_cpp_area_release_free(sb_desc_area); + + return 0; + +err_unreg_prev: + while (i--) + devlink_sb_unregister(devlink, + le32_to_cpu(pf->shared_bufs[i].id)); + kfree(pf->shared_bufs); +err_release_area: + nfp_cpp_area_release_free(sb_desc_area); + return err; +} + +void nfp_shared_buf_unregister(struct nfp_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + unsigned int i; + + for (i = 0; i < pf->num_shared_bufs; i++) + devlink_sb_unregister(devlink, + le32_to_cpu(pf->shared_bufs[i].id)); + kfree(pf->shared_bufs); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index ced62d112aa2..f44d0a857314 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -94,6 +94,8 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, /* MAC Statistics Accumulator */ #define NFP_RESOURCE_MAC_STATISTICS "mac.stat" +int nfp_resource_table_init(struct nfp_cpp *cpp); + struct nfp_resource * nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index cd678323bacb..749655c329b2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -933,7 +933,6 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, u32 *wrptr32 = kernel_vaddr; const u32 __iomem *rdptr32; int n, width; - bool is_64; priv = nfp_cpp_area_priv(area); rdptr64 = priv->iomem + offset; @@ -943,10 +942,15 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, return -EFAULT; width = priv->width.read; - if (width <= 0) return -EINVAL; + /* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4)) + width = TARGET_WIDTH_32; + /* Unaligned? Translate to an explicit access */ if ((priv->offset + offset) & (width - 1)) return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area), @@ -956,36 +960,29 @@ static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, priv->offset + offset, kernel_vaddr, length, width); - is_64 = width == TARGET_WIDTH_64; - - /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */ - if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && - priv->action == NFP_CPP_ACTION_RW) - is_64 = false; + if (WARN_ON(!priv->bar)) + return -EFAULT; - if (is_64) { - if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) - return -EINVAL; - } else { + switch (width) { + case TARGET_WIDTH_32: if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) return -EINVAL; - } - if (WARN_ON(!priv->bar)) - return -EFAULT; + for (n = 0; n < length; n += sizeof(u32)) + *wrptr32++ = __raw_readl(rdptr32++); + return n; +#ifdef __raw_readq + case TARGET_WIDTH_64: + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; - if (is_64) -#ifndef __raw_readq - return -EINVAL; -#else for (n = 0; n < length; n += sizeof(u64)) *wrptr64++ = __raw_readq(rdptr64++); + return n; #endif - else - for (n = 0; n < length; n += sizeof(u32)) - *wrptr32++ = __raw_readl(rdptr32++); - - return n; + default: + return -EINVAL; + } } static int @@ -999,7 +996,6 @@ nfp6000_area_write(struct nfp_cpp_area *area, struct nfp6000_area_priv *priv; u32 __iomem *wrptr32; int n, width; - bool is_64; priv = nfp_cpp_area_priv(area); wrptr64 = priv->iomem + offset; @@ -1009,10 +1005,15 @@ nfp6000_area_write(struct nfp_cpp_area *area, return -EFAULT; width = priv->width.write; - if (width <= 0) return -EINVAL; + /* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4)) + width = TARGET_WIDTH_32; + /* Unaligned? Translate to an explicit access */ if ((priv->offset + offset) & (width - 1)) return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area), @@ -1022,40 +1023,33 @@ nfp6000_area_write(struct nfp_cpp_area *area, priv->offset + offset, kernel_vaddr, length, width); - is_64 = width == TARGET_WIDTH_64; - - /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */ - if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && - priv->action == NFP_CPP_ACTION_RW) - is_64 = false; + if (WARN_ON(!priv->bar)) + return -EFAULT; - if (is_64) { - if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) - return -EINVAL; - } else { + switch (width) { + case TARGET_WIDTH_32: if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) return -EINVAL; - } - if (WARN_ON(!priv->bar)) - return -EFAULT; + for (n = 0; n < length; n += sizeof(u32)) { + __raw_writel(*rdptr32++, wrptr32++); + wmb(); + } + return n; +#ifdef __raw_writeq + case TARGET_WIDTH_64: + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; - if (is_64) -#ifndef __raw_writeq - return -EINVAL; -#else for (n = 0; n < length; n += sizeof(u64)) { __raw_writeq(*rdptr64++, wrptr64++); wmb(); } + return n; #endif - else - for (n = 0; n < length; n += sizeof(u32)) { - __raw_writel(*rdptr32++, wrptr32++); - wmb(); - } - - return n; + default: + return -EINVAL; + } } struct nfp6000_explicit_priv { @@ -1330,6 +1324,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev) /* Finished with card initialization. */ dev_info(&pdev->dev, "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n"); + pcie_print_link_status(pdev); nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); if (!nfp) { diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index c8f2c064cce3..b0da3d436850 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -87,6 +87,11 @@ struct resource; #define NFP_CPP_TARGET_ID_MASK 0x1f +#define NFP_CPP_ATOMIC_RD(target, island) \ + NFP_CPP_ISLAND_ID((target), 3, 0, (island)) +#define NFP_CPP_ATOMIC_WR(target, island) \ + NFP_CPP_ISLAND_ID((target), 4, 0, (island)) + /** * NFP_CPP_ID() - pack target, token, and action into a CPP ID. * @target: NFP CPP target id @@ -295,6 +300,8 @@ void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, + unsigned long long address); /** * nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index cb28ac03e4ca..c88bf673cb76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -59,6 +59,11 @@ static u32 nfp_mutex_unlocked(u16 interface) return (u32)interface << 16 | 0x0000; } +static u32 nfp_mutex_owner(u32 val) +{ + return val >> 16; +} + static bool nfp_mutex_is_locked(u32 val) { return (val & 0xffff) == 0x000f; @@ -351,3 +356,43 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; } + +/** + * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * + * Release lock if held by local system. Extreme care is advised, call only + * when no local lock users can exist. + * + * Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex + */ +int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, + unsigned long long address) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + /* Check lock */ + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; + + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; + + /* Bust the lock */ + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + return 1; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index c9724fb7ea4b..df599d5b6bb3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -100,6 +100,8 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error); +int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, + u64 value); u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c index 7e14725055c7..2dd89dba9311 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -338,3 +338,62 @@ u64 nfp_resource_size(struct nfp_resource *res) { return res->size; } + +/** + * nfp_resource_table_init() - Run initial checks on the resource table + * @cpp: NFP CPP handle + * + * Start-of-day init procedure for resource table. Must be called before + * any local resource table users may exist. + * + * Return: 0 on success, -errno on failure + */ +int nfp_resource_table_init(struct nfp_cpp *cpp) +{ + struct nfp_cpp_mutex *dev_mutex; + int i, err; + + err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE); + if (err < 0) { + nfp_err(cpp, "Error: failed to reclaim resource table mutex\n"); + return err; + } + if (err) + nfp_warn(cpp, "Warning: busted main resource table mutex\n"); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) + return -ENOMEM; + + if (nfp_cpp_mutex_lock(dev_mutex)) { + nfp_err(cpp, "Error: failed to claim resource table mutex\n"); + nfp_cpp_mutex_free(dev_mutex); + return -EINVAL; + } + + /* Resource 0 is the dev_mutex, start from 1 */ + for (i = 1; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + u64 addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, addr); + if (err < 0) { + nfp_err(cpp, + "Error: failed to reclaim resource %d mutex\n", + i); + goto err_unlock; + } + if (err) + nfp_warn(cpp, "Warning: busted resource %d mutex\n", i); + } + + err = 0; +err_unlock: + nfp_cpp_mutex_unlock(dev_mutex); + nfp_cpp_mutex_free(dev_mutex); + + return err; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 46107aefad1c..9e34216578da 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -286,6 +286,49 @@ exit: return val; } +/** + * nfp_rtsym_write_le() - Write an unsigned scalar value to a symbol + * @rtbl: NFP RTsym table + * @name: Symbol name + * @value: Value to write + * + * Lookup a symbol and write a value to it. Symbol can be 4 or 8 bytes in size. + * If 4 bytes then the lower 32-bits of 'value' are used. Value will be + * written as simple little-endian unsigned value. + * + * Return: 0 on success or error code. + */ +int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, + u64 value) +{ + const struct nfp_rtsym *sym; + int err; + u32 id; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) + return -ENOENT; + + id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); + + switch (sym->size) { + case 4: + err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value); + break; + case 8: + err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value); + break; + default: + nfp_err(rtbl->cpp, + "rtsym '%s' unsupported or non-scalar size: %lld\n", + name, sym->size); + err = -EINVAL; + break; + } + + return err; +} + u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 6cec2a6a3dcc..7503aa222392 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -146,8 +146,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter) if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) { memcpy(adapter->mdump.md_template, addr, size); } else { - dev_err(&adapter->pdev->dev, "Failed to get minidump template, " - "err_code : %d, requested_size : %d, actual_size : %d\n ", + dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n", cmd.rsp.cmd, size, cmd.rsp.arg2); } pci_free_consistent(adapter->pdev, size, addr, md_template_addr); @@ -180,8 +179,7 @@ netxen_setup_minidump(struct netxen_adapter *adapter) if ((err == NX_RCODE_CMD_INVALID) || (err == NX_RCODE_CMD_NOT_IMPL)) { dev_info(&adapter->pdev->dev, - "Flashed firmware version does not support minidump, " - "minimum version required is [ %u.%u.%u ].\n ", + "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n", NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR, NX_MD_SUPPORT_SUBVERSION); } diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index c70cf2ad81c0..a0acb94d65f0 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o + qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index e07460a68d30..00db3401b898 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -92,6 +92,8 @@ struct qed_eth_cb_ops; struct qed_dev_info; union qed_mcp_protocol_stats; enum qed_mcp_protocol_type; +enum qed_mfw_tlv_type; +union qed_mfw_tlv_data; /* helpers */ #define QED_MFW_GET_FIELD(name, field) \ @@ -439,6 +441,59 @@ struct qed_fw_data { u32 init_ops_size; }; +enum qed_mf_mode_bit { + /* Supports PF-classification based on tag */ + QED_MF_OVLAN_CLSS, + + /* Supports PF-classification based on MAC */ + QED_MF_LLH_MAC_CLSS, + + /* Supports PF-classification based on protocol type */ + QED_MF_LLH_PROTO_CLSS, + + /* Requires a default PF to be set */ + QED_MF_NEED_DEF_PF, + + /* Allow LL2 to multicast/broadcast */ + QED_MF_LL2_NON_UNICAST, + + /* Allow Cross-PF [& child VFs] Tx-switching */ + QED_MF_INTER_PF_SWITCH, + + /* Unified Fabtic Port support enabled */ + QED_MF_UFP_SPECIFIC, + + /* Disable Accelerated Receive Flow Steering (aRFS) */ + QED_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + QED_MF_8021Q_TAGGING, + + /* Use stag for steering */ + QED_MF_8021AD_TAGGING, + + /* Allow DSCP to TC mapping */ + QED_MF_DSCP_TO_TC_MAP, +}; + +enum qed_ufp_mode { + QED_UFP_MODE_ETS, + QED_UFP_MODE_VNIC_BW, + QED_UFP_MODE_UNKNOWN +}; + +enum qed_ufp_pri_type { + QED_UFP_PRI_OS, + QED_UFP_PRI_VNIC, + QED_UFP_PRI_UNKNOWN +}; + +struct qed_ufp_info { + enum qed_ufp_pri_type pri_type; + enum qed_ufp_mode mode; + u8 tc; +}; + enum BAR_ID { BAR_ID_0, /* used for GRC */ BAR_ID_1 /* Used for doorbells */ @@ -460,6 +515,10 @@ struct qed_simd_fp_handler { void (*func)(void *); }; +enum qed_slowpath_wq_flag { + QED_SLOWPATH_MFW_TLV_REQ, +}; + struct qed_hwfn { struct qed_dev *cdev; u8 my_id; /* ID inside the PF */ @@ -547,6 +606,8 @@ struct qed_hwfn { struct qed_dcbx_info *p_dcbx_info; + struct qed_ufp_info ufp_info; + struct qed_dmae_info dmae_info; /* QM init */ @@ -587,6 +648,9 @@ struct qed_hwfn { #endif struct z_stream_s *stream; + struct workqueue_struct *slowpath_wq; + struct delayed_work slowpath_task; + unsigned long slowpath_task_flags; }; struct pci_params { @@ -669,10 +733,8 @@ struct qed_dev { u8 num_funcs_in_port; u8 path_id; - enum qed_mf_mode mf_mode; -#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) -#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) -#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) + + unsigned long mf_bits; int pcie_width; int pcie_speed; @@ -853,5 +915,9 @@ void qed_get_protocol_stats(struct qed_dev *cdev, union qed_mcp_protocol_stats *stats); int qed_slowpath_irq_req(struct qed_hwfn *hwfn); void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); +int qed_mfw_tlv_req(struct qed_hwfn *hwfn); +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, + enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_data); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 820b226d6ff8..b5b5ff725426 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -47,6 +47,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" +#include "qed_rdma.h" #include "qed_reg_addr.h" #include "qed_sriov.h" @@ -426,7 +427,7 @@ static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) p_mgr->srq_count = num_srqs; } -static u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; @@ -936,14 +937,13 @@ static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) u32 size = min_t(u32, total_size, psz); void **p_virt = &p_mngr->t2[i].p_virt; - *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, - &p_mngr->t2[i].p_phys, GFP_KERNEL); + *p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, + size, &p_mngr->t2[i].p_phys, + GFP_KERNEL); if (!p_mngr->t2[i].p_virt) { rc = -ENOMEM; goto t2_fail; } - memset(*p_virt, 0, size); p_mngr->t2[i].size = size; total_size -= size; } @@ -2071,7 +2071,7 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, u32 num_cons, num_qps, num_srqs; enum protocol_type proto; - num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); + num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { DP_NOTICE(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index a4e95869889f..758a8b4c0de8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -235,6 +235,7 @@ u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, enum protocol_type type); u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, enum protocol_type type); +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn); int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); #define QED_CTX_WORKING_MEM 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 449777f21237..8f31406ec894 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -274,8 +274,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, u32 pri_tc_tbl, int count, u8 dcbx_version) { enum dcbx_protocol_type type; + bool enable, ieee, eth_tlv; u8 tc, priority_map; - bool enable, ieee; u16 protocol_id; int priority; int i; @@ -283,6 +283,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); + eth_tlv = false; /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -304,13 +305,22 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, * indication, but we only got here if there was an * app tlv for the protocol, so dcbx must be enabled. */ - enable = !(type == DCBX_PROTOCOL_ETH); + if (type == DCBX_PROTOCOL_ETH) { + enable = false; + eth_tlv = true; + } else { + enable = true; + } qed_dcbx_update_app_info(p_data, p_hwfn, enable, priority, tc, type); } } + /* If Eth TLV is not detected, use UFP TC as default TC */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv) + p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; + /* Update ramrod protocol data and hw_info fields * with default info when corresponding APP TLV's are not detected. * The enabled field has a different logic for ethernet as only for diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 4926c5532fba..b9ec460dd996 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -183,16 +183,9 @@ enum platform_ids { MAX_PLATFORM_IDS }; -struct chip_platform_defs { - u8 num_ports; - u8 num_pfs; - u8 num_vfs; -}; - /* Chip constant definitions */ struct chip_defs { const char *name; - struct chip_platform_defs per_platform[MAX_PLATFORM_IDS]; }; /* Platform constant definitions */ @@ -317,6 +310,11 @@ struct phy_defs { u32 tbus_data_hi_addr; }; +/* Split type definitions */ +struct split_type_defs { + const char *name; +}; + /******************************** Constants **********************************/ #define MAX_LCIDS 320 @@ -419,6 +417,7 @@ struct phy_defs { #define NUM_RSS_MEM_TYPES 5 #define NUM_BIG_RAM_TYPES 3 +#define BIG_RAM_NAME_LEN 3 #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) @@ -468,21 +467,9 @@ static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} }; /* Chip constant definitions array */ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { - { "bb", - {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "ah", - {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } }, - { "reserved", - {{0, 0, 0}, - {0, 0, 0}, - {0, 0, 0}, - {0, 0, 0} } } + {"bb"}, + {"ah"}, + {"reserved"}, }; /* Storm constant definitions array */ @@ -1587,7 +1574,7 @@ static struct grc_param_defs s_grc_param_defs[] = { {{0, 0, 0}, 0, 1, false, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0, 0}, 0, 1, false, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, false, 0, 0}, /* DBG_GRC_PARAM_DUMP_NIG */ {{1, 1, 1}, 0, 1, false, false, 0, 1}, @@ -1744,6 +1731,23 @@ static struct phy_defs s_phy_defs[] = { PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, }; +static struct split_type_defs s_split_type_defs[] = { + /* SPLIT_TYPE_NONE */ + {"eng"}, + + /* SPLIT_TYPE_PORT */ + {"port"}, + + /* SPLIT_TYPE_PF */ + {"pf"}, + + /* SPLIT_TYPE_PORT_PF */ + {"port"}, + + /* SPLIT_TYPE_VF */ + {"vf"} +}; + /**************************** Private Functions ******************************/ /* Reads and returns a single dword from the specified unaligned buffer */ @@ -1780,28 +1784,68 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 num_pfs = 0, max_pfs_per_port = 0; if (dev_data->initialized) return DBG_STATUS_OK; + /* Set chip */ if (QED_IS_K2(p_hwfn->cdev)) { dev_data->chip_id = CHIP_K2; dev_data->mode_enable[MODE_K2] = 1; + dev_data->num_vfs = MAX_NUM_VFS_K2; + num_pfs = MAX_NUM_PFS_K2; + max_pfs_per_port = MAX_NUM_PFS_K2 / 2; } else if (QED_IS_BB_B0(p_hwfn->cdev)) { dev_data->chip_id = CHIP_BB; dev_data->mode_enable[MODE_BB] = 1; + dev_data->num_vfs = MAX_NUM_VFS_BB; + num_pfs = MAX_NUM_PFS_BB; + max_pfs_per_port = MAX_NUM_PFS_BB; } else { return DBG_STATUS_UNKNOWN_CHIP; } + /* Set platofrm */ dev_data->platform_id = PLATFORM_ASIC; dev_data->mode_enable[MODE_ASIC] = 1; + /* Set port mode */ + switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { + case 0: + dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1; + break; + case 1: + dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1; + break; + case 2: + dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1; + break; + } + + /* Set 100G mode */ + if (dev_data->chip_id == CHIP_BB && + qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB) == 2) + dev_data->mode_enable[MODE_100G] = 1; + + /* Set number of ports */ + if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] || + dev_data->mode_enable[MODE_100G]) + dev_data->num_ports = 1; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2]) + dev_data->num_ports = 2; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4]) + dev_data->num_ports = 4; + + /* Set number of PFs per port */ + dev_data->num_pfs_per_port = min_t(u32, + num_pfs / dev_data->num_ports, + max_pfs_per_port); + /* Initializes the GRC parameters */ qed_dbg_grc_init_params(p_hwfn); dev_data->use_dmae = true; - dev_data->num_regs_read = 0; dev_data->initialized = 1; return DBG_STATUS_OK; @@ -1820,9 +1864,9 @@ static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn, /* Reads the FW info structure for the specified Storm from the chip, * and writes it to the specified fw_info pointer. */ -static void qed_read_fw_info(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 storm_id, struct fw_info *fw_info) +static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 storm_id, struct fw_info *fw_info) { struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; @@ -1944,45 +1988,29 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) { - struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; char fw_ver_str[16] = EMPTY_FW_VERSION_STR; char fw_img_str[16] = EMPTY_FW_IMAGE_STR; struct fw_info fw_info = { {0}, {0} }; u32 offset = 0; if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { - /* Read FW image/version from PRAM in a non-reset SEMI */ - bool found = false; - u8 storm_id; - - for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found; - storm_id++) { - struct storm_defs *storm = &s_storm_defs[storm_id]; - - /* Read FW version/image */ - if (dev_data->block_in_reset[storm->block_id]) - continue; - - /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); - - /* Create FW version/image strings */ - if (snprintf(fw_ver_str, sizeof(fw_ver_str), - "%d_%d_%d_%d", fw_info.ver.num.major, - fw_info.ver.num.minor, fw_info.ver.num.rev, - fw_info.ver.num.eng) < 0) - DP_NOTICE(p_hwfn, - "Unexpected debug error: invalid FW version string\n"); - switch (fw_info.ver.image_id) { - case FW_IMG_MAIN: - strcpy(fw_img_str, "main"); - break; - default: - strcpy(fw_img_str, "unknown"); - break; - } - - found = true; + /* Read FW info from chip */ + qed_read_fw_info(p_hwfn, p_ptt, &fw_info); + + /* Create FW version/image strings */ + if (snprintf(fw_ver_str, sizeof(fw_ver_str), + "%d_%d_%d_%d", fw_info.ver.num.major, + fw_info.ver.num.minor, fw_info.ver.num.rev, + fw_info.ver.num.eng) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid FW version string\n"); + switch (fw_info.ver.image_id) { + case FW_IMG_MAIN: + strcpy(fw_img_str, "main"); + break; + default: + strcpy(fw_img_str, "unknown"); + break; } } @@ -2411,20 +2439,21 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, /* Dumps GRC registers section header. Returns the dumped size in dwords. * The following parameters are dumped: - * - count: no. of dumped entries - * - split: split type - * - id: split ID (dumped only if split_id >= 0) + * - count: no. of dumped entries + * - split_type: split type + * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE) * - param_name: user parameter value (dumped only if param_name != NULL * and param_val != NULL). */ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, bool dump, u32 num_reg_entries, - const char *split_type, - int split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { - u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0); + u8 num_params = 2 + + (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (param_name ? 1 : 0); u32 offset = 0; offset += qed_dump_section_hdr(dump_buf + offset, @@ -2432,8 +2461,9 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, offset += qed_dump_num_param(dump_buf + offset, dump, "count", num_reg_entries); offset += qed_dump_str_param(dump_buf + offset, - dump, "split", split_type); - if (split_id >= 0) + dump, "split", + s_split_type_defs[split_type].name); + if (split_type != SPLIT_TYPE_NONE) offset += qed_dump_num_param(dump_buf + offset, dump, "id", split_id); if (param_name && param_val) @@ -2462,9 +2492,12 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, + u8 split_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0; if (!dump) return len; @@ -2480,8 +2513,27 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, dev_data->num_regs_read = 0; } + switch (split_type) { + case SPLIT_TYPE_PORT: + port_id = split_id; + break; + case SPLIT_TYPE_PF: + pf_id = split_id; + break; + case SPLIT_TYPE_PORT_PF: + port_id = split_id / dev_data->num_pfs_per_port; + pf_id = port_id + dev_data->num_ports * + (split_id % dev_data->num_pfs_per_port); + break; + case SPLIT_TYPE_VF: + vf_id = split_id; + break; + default: + break; + } + /* Try reading using DMAE */ - if (dev_data->use_dmae && + if (dev_data->use_dmae && split_type == SPLIT_TYPE_NONE && (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || wide_bus)) { if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), @@ -2493,7 +2545,37 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, "Failed reading from chip using DMAE, using GRC instead\n"); } - /* Read registers */ + /* If not read using DMAE, read using GRC */ + + /* Set pretend */ + if (split_type != dev_data->pretend.split_type || split_id != + dev_data->pretend.split_id) { + switch (split_type) { + case SPLIT_TYPE_PORT: + qed_port_pretend(p_hwfn, p_ptt, port_id); + break; + case SPLIT_TYPE_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + case SPLIT_TYPE_PORT_PF: + fid = pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; + qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid); + break; + case SPLIT_TYPE_VF: + fid = BIT(PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT) | + (vf_id << PXP_PRETEND_CONCRETE_FID_VFID_SHIFT); + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + default: + break; + } + + dev_data->pretend.split_type = (u8)split_type; + dev_data->pretend.split_id = split_id; + } + + /* Read registers using GRC */ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); return len; @@ -2517,7 +2599,8 @@ static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *dump_buf, - bool dump, u32 addr, u32 len, bool wide_bus) + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, u8 split_id) { u32 offset = 0; @@ -2525,7 +2608,8 @@ static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + split_type, split_id); return offset; } @@ -2558,7 +2642,8 @@ static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, curr_len, false); + dump, addr, curr_len, false, + SPLIT_TYPE_NONE, 0); reg_offset += curr_len; addr += curr_len; @@ -2580,6 +2665,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, struct dbg_array input_regs_arr, u32 *dump_buf, bool dump, + enum init_split_types split_type, + u8 split_id, bool block_enable[MAX_BLOCK_ID], u32 *num_dumped_reg_entries) { @@ -2627,7 +2714,8 @@ static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, dump, addr, len, - wide_bus); + wide_bus, + split_type, split_id); (*num_dumped_reg_entries)++; } } @@ -2642,19 +2730,28 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, bool block_enable[MAX_BLOCK_ID], - const char *split_type_name, - u32 split_id, + enum init_split_types split_type, + u8 split_id, const char *param_name, const char *param_val) { + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + enum init_split_types hdr_split_type = split_type; u32 num_dumped_reg_entries, offset; + u8 hdr_split_id = split_id; + + /* In PORT_PF split type, print a port split header */ + if (split_type == SPLIT_TYPE_PORT_PF) { + hdr_split_type = SPLIT_TYPE_PORT; + hdr_split_id = split_id / dev_data->num_pfs_per_port; + } /* Calculate register dump header size (and skip it for now) */ offset = qed_grc_dump_regs_hdr(dump_buf, false, 0, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); /* Dump registers */ offset += qed_grc_dump_regs_entries(p_hwfn, @@ -2662,6 +2759,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, input_regs_arr, dump_buf + offset, dump, + split_type, + split_id, block_enable, &num_dumped_reg_entries); @@ -2670,8 +2769,8 @@ static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, qed_grc_dump_regs_hdr(dump_buf, dump, num_dumped_reg_entries, - split_type_name, - split_id, param_name, param_val); + hdr_split_type, + hdr_split_id, param_name, param_val); return num_dumped_reg_entries > 0 ? offset : 0; } @@ -2687,26 +2786,21 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, const char *param_name, const char *param_val) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - struct chip_platform_defs *chip_platform; u32 offset = 0, input_offset = 0; - struct chip_defs *chip; - u8 port_id, pf_id, vf_id; u16 fid; - - chip = &s_chip_defs[dev_data->chip_id]; - chip_platform = &chip->per_platform[dev_data->platform_id]; - while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_regs_arr; + enum init_split_types split_type; + u16 split_count = 0; u32 split_data_size; - u8 split_type_id; + u8 split_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -2716,99 +2810,44 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset]; curr_input_regs_arr.size_in_dwords = split_data_size; - switch (split_type_id) { + switch (split_type) { case SPLIT_TYPE_NONE: - offset += qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "eng", - (u32)(-1), - param_name, - param_val); + split_count = 1; break; - case SPLIT_TYPE_PORT: - for (port_id = 0; port_id < chip_platform->num_ports; - port_id++) { - if (dump) - qed_port_pretend(p_hwfn, p_ptt, - port_id); - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "port", port_id, - param_name, - param_val); - } + split_count = dev_data->num_ports; break; - case SPLIT_TYPE_PF: case SPLIT_TYPE_PORT_PF: - for (pf_id = 0; pf_id < chip_platform->num_pfs; - pf_id++) { - u8 pfid_shift = - PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; - - if (dump) { - fid = pf_id << pfid_shift; - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, - p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, - block_enable, - "pf", - pf_id, - param_name, - param_val); - } + split_count = dev_data->num_ports * + dev_data->num_pfs_per_port; break; - case SPLIT_TYPE_VF: - for (vf_id = 0; vf_id < chip_platform->num_vfs; - vf_id++) { - u8 vfvalid_shift = - PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT; - u8 vfid_shift = - PXP_PRETEND_CONCRETE_FID_VFID_SHIFT; - - if (dump) { - fid = BIT(vfvalid_shift) | - (vf_id << vfid_shift); - qed_fid_pretend(p_hwfn, p_ptt, fid); - } - - offset += - qed_grc_dump_split_data(p_hwfn, p_ptt, - curr_input_regs_arr, - dump_buf + offset, - dump, block_enable, - "vf", vf_id, - param_name, - param_val); - } + split_count = dev_data->num_vfs; break; - default: - break; + return 0; } + for (split_id = 0; split_id < split_count; split_id++) + offset += qed_grc_dump_split_data(p_hwfn, p_ptt, + curr_input_regs_arr, + dump_buf + offset, + dump, block_enable, + split_type, + split_id, + param_name, + param_val); + input_offset += split_data_size; } - /* Pretend to original PF */ + /* Cancel pretends (pretend to original PF) */ if (dump) { fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT; qed_fid_pretend(p_hwfn, p_ptt, fid); + dev_data->pretend.split_type = SPLIT_TYPE_NONE; + dev_data->pretend.split_id = 0; } return offset; @@ -2824,7 +2863,8 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, + SPLIT_TYPE_NONE, 0, NULL, NULL); /* Write reset registers */ for (i = 0; i < MAX_DBG_RESET_REGS; i++) { @@ -2837,14 +2877,15 @@ static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, dump, BYTES_TO_DWORDS (s_reset_regs_defs[i].addr), 1, - false); + false, SPLIT_TYPE_NONE, 0); num_regs++; } /* Write header */ if (dump) qed_grc_dump_regs_hdr(dump_buf, - true, num_regs, "eng", -1, NULL, NULL); + true, num_regs, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2863,7 +2904,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, /* Calculate header size */ offset += qed_grc_dump_regs_hdr(dump_buf, - false, 0, "eng", -1, NULL, NULL); + false, 0, SPLIT_TYPE_NONE, + 0, NULL, NULL); /* Write parity registers */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { @@ -2898,7 +2940,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); addr = GET_FIELD(reg_data->data, DBG_ATTN_REG_STS_ADDRESS); offset += qed_grc_dump_reg_entry(p_hwfn, @@ -2906,7 +2949,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - 1, false); + 1, false, + SPLIT_TYPE_NONE, 0); num_reg_entries += 2; } } @@ -2928,7 +2972,7 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); num_reg_entries++; } @@ -2936,7 +2980,8 @@ static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, if (dump) qed_grc_dump_regs_hdr(dump_buf, true, - num_reg_entries, "eng", -1, NULL, NULL); + num_reg_entries, SPLIT_TYPE_NONE, + 0, NULL, NULL); return offset; } @@ -2949,7 +2994,8 @@ static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, u32 offset = 0, addr; offset += qed_grc_dump_regs_hdr(dump_buf, - dump, 2, "eng", -1, NULL, NULL); + dump, 2, SPLIT_TYPE_NONE, 0, + NULL, NULL); /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be * skipped). @@ -3095,7 +3141,8 @@ static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, - dump, addr, len, wide_bus); + dump, addr, len, wide_bus, + SPLIT_TYPE_NONE, 0); return offset; } @@ -3234,12 +3281,12 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; struct dbg_array curr_input_mems_arr; + enum init_split_types split_type; u32 split_data_size; - u8 split_type_id; split_hdr = (const struct dbg_dump_split_hdr *) &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++]; - split_type_id = + split_type = GET_FIELD(split_hdr->hdr, DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); split_data_size = @@ -3249,20 +3296,15 @@ static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, &s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset]; curr_input_mems_arr.size_in_dwords = split_data_size; - switch (split_type_id) { - case SPLIT_TYPE_NONE: + if (split_type == SPLIT_TYPE_NONE) offset += qed_grc_dump_mem_entries(p_hwfn, p_ptt, curr_input_mems_arr, dump_buf + offset, dump); - break; - - default: + else DP_NOTICE(p_hwfn, "Dumping split memories is currently not supported\n"); - break; - } input_offset += split_data_size; } @@ -3622,7 +3664,8 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, dump, addr, num_dwords_to_read, - false); + false, + SPLIT_TYPE_NONE, 0); total_dwords -= num_dwords_to_read; rss_addr++; } @@ -3650,8 +3693,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 : 128; - strscpy(type_name, big_ram->instance_name, sizeof(type_name)); - strscpy(mem_name, big_ram->instance_name, sizeof(mem_name)); + strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); /* Dump memory header */ offset += qed_grc_dump_mem_hdr(p_hwfn, @@ -3681,7 +3724,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, dump, addr, len, - false); + false, SPLIT_TYPE_NONE, 0); } return offset; @@ -3730,7 +3773,8 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, /* Dump required non-MCP registers */ offset += qed_grc_dump_regs_hdr(dump_buf + offset, - dump, 1, "eng", -1, "block", "MCP"); + dump, 1, SPLIT_TYPE_NONE, 0, + "block", "MCP"); addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); offset += qed_grc_dump_reg_entry(p_hwfn, p_ptt, @@ -3738,7 +3782,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump, addr, 1, - false); + false, SPLIT_TYPE_NONE, 0); /* Release MCP */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -3922,7 +3966,8 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, dump, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); } /* Disable block's client and debug output */ @@ -3948,28 +3993,15 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; bool parities_masked = false; - u8 i, port_mode = 0; u32 offset = 0; + u8 i; *num_dumped_dwords = 0; + dev_data->num_regs_read = 0; - if (dump) { - /* Find port mode */ - switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) { - case 0: - port_mode = 1; - break; - case 1: - port_mode = 2; - break; - case 2: - port_mode = 4; - break; - } - - /* Update reset state */ + /* Update reset state */ + if (dump) qed_update_blocks_reset_state(p_hwfn, p_ptt); - } /* Dump global params */ offset += qed_dump_common_global_params(p_hwfn, @@ -3988,7 +4020,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NUM_LTIDS)); offset += qed_dump_num_param(dump_buf + offset, - dump, "num-ports", port_mode); + dump, "num-ports", dev_data->num_ports); /* Dump reset registers (dumped before taking blocks out of reset ) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) @@ -4092,10 +4124,10 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, offset += qed_grc_dump_phy(p_hwfn, p_ptt, dump_buf + offset, dump); - /* Dump static debug data */ + /* Dump static debug data (only if not during debug bus recording) */ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_STATIC) && - dev_data->bus.state == DBG_BUS_STATE_IDLE) + (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE)) offset += qed_grc_dump_static_debug(p_hwfn, p_ptt, dump_buf + offset, dump); @@ -4249,7 +4281,8 @@ static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, - reg->size, wide_bus); + reg->size, wide_bus, + SPLIT_TYPE_NONE, 0); } } @@ -4372,7 +4405,8 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, next_reg_offset, dump, addr, reg->entry_size, - wide_bus); + wide_bus, + SPLIT_TYPE_NONE, 0); } /* Call rule condition function. @@ -4722,7 +4756,8 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, BYTES_TO_DWORDS(trace_data_grc_addr), - trace_data_size_dwords, false); + trace_data_size_dwords, false, + SPLIT_TYPE_NONE, 0); /* Resume MCP (only if halt succeeded) */ if (halted && qed_mcp_resume(p_hwfn, p_ptt)) @@ -4828,7 +4863,8 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; } @@ -4897,7 +4933,8 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, true, addr, len, - true); + true, SPLIT_TYPE_NONE, + 0); fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; } @@ -4955,7 +4992,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, true, addr, override_window_dwords, - true); + true, SPLIT_TYPE_NONE, 0); qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); out: @@ -4997,7 +5034,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, continue; /* Read FW info for the current Storm */ - qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); asserts = &fw_info.fw_asserts_section; @@ -5035,7 +5072,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump, addr, asserts->list_element_dword_size, - false); + false, SPLIT_TYPE_NONE, 0); } /* Dump last section */ @@ -5062,6 +5099,28 @@ enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + + /* Skip Storm if it's in reset */ + if (dev_data->block_in_reset[storm->block_id]) + continue; + + /* Read FW info for the current Storm */ + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info); + + return true; + } + + return false; +} + /* Assign default GRC param values */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) { @@ -7778,6 +7837,57 @@ int qed_dbg_igu_fifo_size(struct qed_dev *cdev) return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); } +int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, u32 *length) +{ + struct qed_nvm_image_att image_att; + int rc; + + *length = 0; + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); + if (rc) + return rc; + + *length = image_att.length; + + return rc; +} + +int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes, enum qed_nvm_images image_id) +{ + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + u32 len_rounded, i; + __be32 val; + int rc; + + *num_dumped_bytes = 0; + rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded); + if (rc) + return rc; + + DP_NOTICE(p_hwfn->cdev, + "Collecting a debug feature [\"nvram image %d\"]\n", + image_id); + + len_rounded = roundup(len_rounded, sizeof(u32)); + rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded); + if (rc) + return rc; + + /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */ + if (image_id != QED_NVM_IMAGE_NVM_META) + for (i = 0; i < len_rounded; i += 4) { + val = cpu_to_be32(*(u32 *)(buffer + i)); + *(u32 *)(buffer + i) = val; + } + + *num_dumped_bytes = len_rounded; + + return rc; +} + int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) { @@ -7831,6 +7941,9 @@ enum debug_print_features { IGU_FIFO = 6, PHY = 7, FW_ASSERTS = 8, + NVM_CFG1 = 9, + DEFAULT_CFG = 10, + NVM_META = 11, }; static u32 qed_calc_regdump_header(enum debug_print_features feature, @@ -7965,13 +8078,61 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); } + /* nvm cfg1 */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_NVM_CFG1); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(NVM_CFG1, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc); + } + + /* nvm default */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_DEFAULT_CFG); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(DEFAULT_CFG, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG", + rc); + } + + /* nvm meta */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + REGDUMP_HEADER_SIZE, + &feature_size, QED_NVM_IMAGE_NVM_META); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(NVM_META, cur_engine, + feature_size, omit_engine); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc); + } + return 0; } int qed_dbg_all_data_size(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->dbg_params.engine_for_debug]; + u32 regs_len = 0, image_len = 0; u8 cur_engine, org_engine; - u32 regs_len = 0; org_engine = qed_get_debug_engine(cdev); for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { @@ -7993,6 +8154,15 @@ int qed_dbg_all_data_size(struct qed_dev *cdev) /* Engine common */ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev); + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; return regs_len; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d2ad5e92c74f..b285edc8d6a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1098,7 +1098,7 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Sending final cleanup for PFVF[%d] [Command %08x\n]", + "Sending final cleanup for PFVF[%d] [Command %08x]\n", id, command); qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); @@ -1149,18 +1149,10 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) return -EINVAL; } - switch (p_hwfn->cdev->mf_mode) { - case QED_MF_DEFAULT: - case QED_MF_NPAR: - hw_mode |= 1 << MODE_MF_SI; - break; - case QED_MF_OVLAN: + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) hw_mode |= 1 << MODE_MF_SD; - break; - default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + else hw_mode |= 1 << MODE_MF_SI; - } hw_mode |= 1 << MODE_ASIC; @@ -1507,6 +1499,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, p_hwfn->hw_info.ovlan); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, + 1); } /* Enable classification by MAC if needed */ @@ -1557,7 +1554,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* send function start command */ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, - p_hwfn->cdev->mf_mode, allow_npar_tx_switch); if (rc) { DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); @@ -1644,6 +1640,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; + u16 ether_type; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); @@ -1677,6 +1674,24 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) if (rc) return rc; + if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, + &cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, + &cdev->mf_bits))) { + if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) + ether_type = ETH_P_8021Q; + else + ether_type = ETH_P_8021AD; + STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, + ether_type); + } + qed_fill_load_req_params(&load_req_params, p_params->p_drv_load_params); rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, @@ -2639,31 +2654,57 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) link->pause.autoneg, p_caps->default_eee, p_caps->eee_lpi_timer); - /* Read Multi-function information from shmem */ - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, glob) + - offsetof(struct nvm_cfg1_glob, generic_cont0); + if (IS_LEAD_HWFN(p_hwfn)) { + struct qed_dev *cdev = p_hwfn->cdev; - generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, generic_cont0); - mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> - NVM_CFG1_GLOB_MF_MODE_OFFSET; + generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); - switch (mf_mode) { - case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_mode = QED_MF_OVLAN; - break; - case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_mode = QED_MF_NPAR; - break; - case NVM_CFG1_GLOB_MF_MODE_DEFAULT: - p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; - break; + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> + NVM_CFG1_GLOB_MF_MODE_OFFSET; + + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); + break; + case NVM_CFG1_GLOB_MF_MODE_UFP: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_UFP_SPECIFIC) | + BIT(QED_MF_8021Q_TAGGING); + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_8021AD_TAGGING); + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST) | + BIT(QED_MF_INTER_PF_SWITCH); + break; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST); + if (QED_IS_BB(p_hwfn->cdev)) + cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); + break; + } + + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + cdev->mf_bits); } - DP_INFO(p_hwfn, "Multi function mode is %08x\n", - p_hwfn->cdev->mf_mode); - /* Read Multi-function information from shmem */ + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + p_hwfn->cdev->mf_bits); + + /* Read device capabilities information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, device_capabilities); @@ -2751,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, { u32 port_mode; - port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); + port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); if (port_mode < 3) { p_hwfn->cdev->num_ports_in_engine = 1; @@ -2856,6 +2897,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_mcp_cmd_port_init(p_hwfn, p_ptt); qed_get_eee_caps(p_hwfn, p_ptt); + + qed_mcp_read_ufp_config(p_hwfn, p_ptt); } if (qed_mcp_is_init(p_hwfn)) { @@ -3462,7 +3505,7 @@ int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0, en; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) return 0; qed_llh_mac_to_filter(&high, &low, p_filter); @@ -3507,7 +3550,7 @@ void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) return; qed_llh_mac_to_filter(&high, &low, p_filter); @@ -3549,7 +3592,7 @@ qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0, en; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) return 0; switch (type) { @@ -3647,7 +3690,7 @@ qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn, u32 high = 0, low = 0; int i; - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) return; switch (type) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 2dc9b312a795..cc1b373c0ace 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -313,6 +313,9 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, p_data->d_id.addr_mid = p_conn->d_id.addr_mid; p_data->d_id.addr_lo = p_conn->d_id.addr_lo; p_data->flags = p_conn->flags; + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + SET_FIELD(p_data->flags, + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1); p_data->def_q_idx = p_conn->def_q_idx; return qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 7f5ec42dde48..bee10c1781fb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1095,14 +1095,16 @@ enum personality_type { struct pf_start_tunnel_config { u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; - u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved[3]; }; /* Ramrod data for PF start ramrod */ @@ -1145,14 +1147,17 @@ struct pf_update_tunnel_config { u8 update_rx_def_non_ucast_clss; u8 set_vxlan_udp_port_flg; u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; u8 tunnel_clss_vxlan; u8 tunnel_clss_l2geneve; u8 tunnel_clss_ipgeneve; u8 tunnel_clss_l2gre; u8 tunnel_clss_ipgre; + u8 reserved; __le16 vxlan_udp_port; __le16 geneve_udp_port; - __le16 reserved; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved1[3]; }; /* Data for port update ramrod */ @@ -2535,7 +2540,14 @@ struct idle_chk_data { u16 reserved2; }; -/* Debug Tools data (per HW function) */ +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ struct dbg_tools_data { struct dbg_grc_data grc; struct dbg_bus_data bus; @@ -2544,8 +2556,13 @@ struct dbg_tools_data { u8 block_in_reset[88]; u8 chip_id; u8 platform_id; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; u8 initialized; u8 use_dmae; + u8 reserved; + struct pretend_params pretend; u32 num_regs_read; }; @@ -2975,6 +2992,24 @@ void qed_read_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); /** + * @brief qed_read_fw_info - Reads FW info from the chip. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + * + * @param p_hwfn - HW device data + * @param p_ptt - Ptt window used for writing the registers. + * @param fw_info - Out: a pointer to write the FW info into. + * + * @return true if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); + +/** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. * @@ -4110,6 +4145,21 @@ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); +#define NUM_STORMS 6 + +/** + * @brief qed_set_rdma_error_level - Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers + * @param assert_level - An array of assert levels for each storm. + * + */ +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]); + /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) @@ -4340,27 +4390,67 @@ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[47].base + ((pf_id) * IRO[47].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) /* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[48].base + ((roce_pf_id) * IRO[48].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size) + (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) /* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[49].base + ((roce_pf_id) * IRO[49].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) /* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[50].base + ((roce_pf_id) * IRO[50].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) -static const struct iro iro_arr[51] = { +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + +static const struct iro iro_arr[59] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@ -4408,10 +4498,18 @@ static const struct iro iro_arr[51] = { {0x10768, 0x20, 0x0, 0x0, 0x20}, {0x2d48, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, + {0xc748, 0x8, 0x0, 0x0, 0x1}, + {0xa128, 0x8, 0x0, 0x0, 0x1}, + {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xf030, 0x8, 0x0, 0x0, 0x1}, + {0x13028, 0x8, 0x0, 0x0, 0x1}, + {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, - {0xed90, 0x10, 0x0, 0x0, 0x10}, - {0xa3a0, 0x10, 0x0, 0x0, 0x10}, + {0xed90, 0x28, 0x0, 0x0, 0x28}, + {0xa520, 0x18, 0x0, 0x0, 0x18}, + {0xa6a0, 0x8, 0x0, 0x0, 0x8}, {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0x13c50, 0x18, 0x0, 0x0, 0x18}, }; /* Runtime array offsets */ @@ -4797,147 +4895,147 @@ static const struct iro iro_arr[51] = { #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 #define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 - -#define RUNTIME_ARRAY_SIZE 43023 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021 + +#define RUNTIME_ARRAY_SIZE 43022 + /* Init Callbacks */ #define DMAE_READY_CB 0 @@ -5694,8 +5792,10 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 -#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF -#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7 }; /* Command for setting tpa parameters */ @@ -6756,7 +6856,7 @@ struct e4_ystorm_rdma_task_ag_ctx { #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -6812,7 +6912,7 @@ struct e4_mstorm_rdma_task_ag_ctx { #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 #define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 key; - __le32 mw_cnt; + __le32 mw_cnt_or_qp_id; u8 ref_cnt_seq; u8 ctx_upd_seq; __le16 dif_flags; @@ -7075,8 +7175,7 @@ struct rdma_register_tid_ramrod_data { struct regpair va; struct regpair pbl_base; struct regpair dif_error_addr; - struct regpair dif_runt_addr; - __le32 reserved4[2]; + __le32 reserved4[4]; }; /* rdma resize cq output params */ @@ -7144,8 +7243,7 @@ struct rdma_srq_modify_ramrod_data { enum rdma_tid_type { RDMA_TID_REGISTERED_MR, RDMA_TID_FMR, - RDMA_TID_MW_TYPE1, - RDMA_TID_MW_TYPE2A, + RDMA_TID_MW, MAX_RDMA_TID_TYPE }; @@ -7681,6 +7779,16 @@ struct e4_roce_conn_context { struct ustorm_roce_conn_st_ctx ustorm_st_context; }; +/* roce cqes statistics */ +struct roce_cqe_stats { + __le32 req_cqe_error; + __le32 req_remote_access_errors; + __le32 req_remote_invalid_request; + __le32 resp_cqe_error; + __le32 resp_local_length_error; + __le32 reserved; +}; + /* roce create qp requester ramrod data */ struct roce_create_qp_req_ramrod_data { __le16 flags; @@ -7798,8 +7906,8 @@ struct roce_dcqcn_sent_stats { /* RoCE destroy qp requester output params */ struct roce_destroy_qp_req_output_params { - __le32 num_bound_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp requester ramrod data */ @@ -7809,8 +7917,8 @@ struct roce_destroy_qp_req_ramrod_data { /* RoCE destroy qp responder output params */ struct roce_destroy_qp_resp_output_params { - __le32 num_invalidated_mw; __le32 cq_prod; + __le32 reserved; }; /* RoCE destroy qp responder ramrod data */ @@ -7818,16 +7926,27 @@ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* roce error statistics */ +struct roce_error_stats { + __le32 resp_remote_access_errors; + __le32 reserved; +}; + /* roce special events statistics */ struct roce_events_stats { - __le16 silent_drops; - __le16 rnr_naks_sent; + __le32 silent_drops; + __le32 rnr_naks_sent; __le32 retransmit_count; __le32 icrc_error_count; - __le32 reserved; + __le32 implied_nak_seq_err; + __le32 duplicate_request; + __le32 local_ack_timeout_err; + __le32 out_of_sequence; + __le32 packet_seq_err; + __le32 rnr_nak_retry_err; }; -/* ROCE slow path EQ cmd IDs */ +/* roce slow path EQ cmd IDs */ enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, @@ -7845,6 +7964,9 @@ struct roce_init_func_params { u8 cnp_dscp; u8 reserved; __le32 cnp_send_timeout; + __le16 rl_offset; + u8 rl_count_log; + u8 reserved1[5]; }; /* roce func init ramrod data */ @@ -8532,7 +8654,7 @@ struct e4_tstorm_roce_resp_conn_ag_ctx { __le16 rq_prod; __le16 conn_dpi; __le16 irq_cons; - __le32 num_invlidated_mw; + __le32 reg9; __le32 reg10; }; @@ -9725,6 +9847,8 @@ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, + IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, MAX_IWARP_EQE_ASYNC_OPCODE }; @@ -11863,6 +11987,8 @@ struct public_global { u32 running_bundle_id; s32 external_temperature; u32 mdump_reason; + u32 data_ptr; + u32 data_size; }; struct fw_flr_mb { @@ -11993,6 +12119,17 @@ struct public_port { #define EEE_REMOTE_TW_TX_OFFSET 0 #define EEE_REMOTE_TW_RX_MASK 0xffff0000 #define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 reserved1; + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 }; struct public_func { @@ -12069,6 +12206,23 @@ struct public_func { #define DRV_ID_DRV_INIT_HW_MASK 0x80000000 #define DRV_ID_DRV_INIT_HW_SHIFT 31 #define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 }; struct mcp_mac { @@ -12295,6 +12449,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 #define DRV_MSG_CODE_RESOURCE_CMD 0x00230000 +#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 #define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F #define RESOURCE_CMD_REQ_RESC_SHIFT 0 @@ -12495,6 +12650,8 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_BW_UPDATE11, + MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_GET_TLV_REQ, MFW_DRV_MSG_MAX }; @@ -12530,6 +12687,233 @@ struct mcp_public_data { struct public_func func[MCP_GLOB_FUNC_MAX]; }; +/* OCBB definitions */ +enum tlvs { + /* Category 1: Device Properties */ + DRV_TLV_CLP_STR, + DRV_TLV_CLP_STR_CTD, + /* Category 6: Device Configuration */ + DRV_TLV_SCSI_TO, + DRV_TLV_R_T_TOV, + DRV_TLV_R_A_TOV, + DRV_TLV_E_D_TOV, + DRV_TLV_CR_TOV, + DRV_TLV_BOOT_TYPE, + /* Category 8: Port Configuration */ + DRV_TLV_NPIV_ENABLED, + /* Category 10: Function Configuration */ + DRV_TLV_FEATURE_FLAGS, + DRV_TLV_LOCAL_ADMIN_ADDR, + DRV_TLV_ADDITIONAL_MAC_ADDR_1, + DRV_TLV_ADDITIONAL_MAC_ADDR_2, + DRV_TLV_LSO_MAX_OFFLOAD_SIZE, + DRV_TLV_LSO_MIN_SEGMENT_COUNT, + DRV_TLV_PROMISCUOUS_MODE, + DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, + DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, + DRV_TLV_OS_DRIVER_STATES, + DRV_TLV_PXE_BOOT_PROGRESS, + /* Category 12: FC/FCoE Configuration */ + DRV_TLV_NPIV_STATE, + DRV_TLV_NUM_OF_NPIV_IDS, + DRV_TLV_SWITCH_NAME, + DRV_TLV_SWITCH_PORT_NUM, + DRV_TLV_SWITCH_PORT_ID, + DRV_TLV_VENDOR_NAME, + DRV_TLV_SWITCH_MODEL, + DRV_TLV_SWITCH_FW_VER, + DRV_TLV_QOS_PRIORITY_PER_802_1P, + DRV_TLV_PORT_ALIAS, + DRV_TLV_PORT_STATE, + DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_LINK_FAILURE_COUNT, + DRV_TLV_FCOE_BOOT_PROGRESS, + /* Category 13: iSCSI Configuration */ + DRV_TLV_TARGET_LLMNR_ENABLED, + DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, + DRV_TLV_DATA_DIGEST_FLAG_ENABLED, + DRV_TLV_AUTHENTICATION_METHOD, + DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, + DRV_TLV_MAX_FRAME_SIZE, + DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_ISCSI_BOOT_PROGRESS, + /* Category 20: Device Data */ + DRV_TLV_PCIE_BUS_RX_UTILIZATION, + DRV_TLV_PCIE_BUS_TX_UTILIZATION, + DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, + DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, + DRV_TLV_NCSI_RX_BYTES_RECEIVED, + DRV_TLV_NCSI_TX_BYTES_SENT, + /* Category 22: Base Port Data */ + DRV_TLV_RX_DISCARDS, + DRV_TLV_RX_ERRORS, + DRV_TLV_TX_ERRORS, + DRV_TLV_TX_DISCARDS, + DRV_TLV_RX_FRAMES_RECEIVED, + DRV_TLV_TX_FRAMES_SENT, + /* Category 23: FC/FCoE Port Data */ + DRV_TLV_RX_BROADCAST_PACKETS, + DRV_TLV_TX_BROADCAST_PACKETS, + /* Category 28: Base Function Data */ + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, + DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_PF_RX_FRAMES_RECEIVED, + DRV_TLV_RX_BYTES_RECEIVED, + DRV_TLV_PF_TX_FRAMES_SENT, + DRV_TLV_TX_BYTES_SENT, + DRV_TLV_IOV_OFFLOAD, + DRV_TLV_PCI_ERRORS_CAP_ID, + DRV_TLV_UNCORRECTABLE_ERROR_STATUS, + DRV_TLV_UNCORRECTABLE_ERROR_MASK, + DRV_TLV_CORRECTABLE_ERROR_STATUS, + DRV_TLV_CORRECTABLE_ERROR_MASK, + DRV_TLV_PCI_ERRORS_AECC_REGISTER, + DRV_TLV_TX_QUEUES_EMPTY, + DRV_TLV_RX_QUEUES_EMPTY, + DRV_TLV_TX_QUEUES_FULL, + DRV_TLV_RX_QUEUES_FULL, + /* Category 29: FC/FCoE Function Data */ + DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_FRAMES_RECEIVED, + DRV_TLV_FCOE_RX_BYTES_RECEIVED, + DRV_TLV_FCOE_TX_FRAMES_SENT, + DRV_TLV_FCOE_TX_BYTES_SENT, + DRV_TLV_CRC_ERROR_COUNT, + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_1_TIMESTAMP, + DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_2_TIMESTAMP, + DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_3_TIMESTAMP, + DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_4_TIMESTAMP, + DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_5_TIMESTAMP, + DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, + DRV_TLV_LOSS_OF_SIGNAL_ERRORS, + DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, + DRV_TLV_DISPARITY_ERROR_COUNT, + DRV_TLV_CODE_VIOLATION_ERROR_COUNT, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_TIMESTAMP, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, + DRV_TLV_LAST_FLOGI_RJT, + DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, + DRV_TLV_FDISCS_SENT_COUNT, + DRV_TLV_FDISC_ACCS_RECEIVED, + DRV_TLV_FDISC_RJTS_RECEIVED, + DRV_TLV_PLOGI_SENT_COUNT, + DRV_TLV_PLOGI_ACCS_RECEIVED, + DRV_TLV_PLOGI_RJTS_RECEIVED, + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_1_TIMESTAMP, + DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_2_TIMESTAMP, + DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_3_TIMESTAMP, + DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_4_TIMESTAMP, + DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_5_TIMESTAMP, + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_1_ACC_TIMESTAMP, + DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_2_ACC_TIMESTAMP, + DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_3_ACC_TIMESTAMP, + DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_4_ACC_TIMESTAMP, + DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_5_ACC_TIMESTAMP, + DRV_TLV_LOGOS_ISSUED, + DRV_TLV_LOGO_ACCS_RECEIVED, + DRV_TLV_LOGO_RJTS_RECEIVED, + DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_1_TIMESTAMP, + DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_2_TIMESTAMP, + DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_3_TIMESTAMP, + DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_4_TIMESTAMP, + DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_5_TIMESTAMP, + DRV_TLV_LOGOS_RECEIVED, + DRV_TLV_ACCS_ISSUED, + DRV_TLV_PRLIS_ISSUED, + DRV_TLV_ACCS_RECEIVED, + DRV_TLV_ABTS_SENT_COUNT, + DRV_TLV_ABTS_ACCS_RECEIVED, + DRV_TLV_ABTS_RJTS_RECEIVED, + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_1_TIMESTAMP, + DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_2_TIMESTAMP, + DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_3_TIMESTAMP, + DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_4_TIMESTAMP, + DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_5_TIMESTAMP, + DRV_TLV_RSCNS_RECEIVED, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, + DRV_TLV_LUN_RESETS_ISSUED, + DRV_TLV_ABORT_TASK_SETS_ISSUED, + DRV_TLV_TPRLOS_SENT, + DRV_TLV_NOS_SENT_COUNT, + DRV_TLV_NOS_RECEIVED_COUNT, + DRV_TLV_OLS_COUNT, + DRV_TLV_LR_COUNT, + DRV_TLV_LRR_COUNT, + DRV_TLV_LIP_SENT_COUNT, + DRV_TLV_LIP_RECEIVED_COUNT, + DRV_TLV_EOFA_COUNT, + DRV_TLV_EOFNI_COUNT, + DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, + DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_BUSY_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, + DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, + DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, + DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_1_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_2_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_3_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_4_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_5_TIMESTAMP, + /* Category 30: iSCSI Function Data */ + DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, + DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, + DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, + DRV_TLV_ISCSI_PDU_TX_BYTES_SENT +}; + struct nvm_cfg_mac_address { u32 mac_addr_hi; #define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index fca2dbd93ad9..70504dcf4087 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) *(u32 *)&p_ptt->pxp.pretend); } +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + p_ptt->pxp.pretend.control = cpu_to_le16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) { u32 concrete_fid = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 8db2839a8ec8..505e94db939d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -245,6 +245,18 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** + * @brief qed_port_fid_pretend - pretend to another port and another function + * when accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + */ +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid); + +/** * @brief qed_vfid_to_concrete - build a concrete FID for a * given VF ID * diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 1365da7c8900..d845badf9b90 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { - u32 reg_val, cam_line, ram_line_lo, ram_line_hi; + u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Search no IP as GFT */ + search_non_ip_as_gft = 0; + /* Tunnel type */ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); @@ -1337,9 +1340,14 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + + /* Allow tunneled traffic without inner IP */ + search_non_ip_as_gft = 1; } qed_wr(p_hwfn, + p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, ram_line_lo); @@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } + +static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) +{ + switch (storm_id) { + case 0: + return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 1: + return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 2: + return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 3: + return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 4: + return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 5: + return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + + default: + return 0; + } +} + +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); + + qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 2a2b1018ed1d..90a2b53096e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -271,6 +271,8 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, p_ramrod->sq_num_pages = qp->sq_num_pages; p_ramrod->rq_num_pages = qp->rq_num_pages; + p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); @@ -1157,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt, struct qed_iwarp_info *iwarp_info; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; - u8 ts_hdr_size = 0; u32 cid; int rc; @@ -1216,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt, iparams->cm_info.private_data, iparams->cm_info.private_data_len); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - ep->mss = iparams->mss - ts_hdr_size; + ep->mss = iparams->mss; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->event_cb = iparams->event_cb; @@ -2335,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) u8 local_mac_addr[ETH_ALEN]; struct qed_iwarp_ep *ep; int tcp_start_offset; - u8 ts_hdr_size = 0; u8 ll2_syn_handle; int payload_len; u32 hdr_size; @@ -2413,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); - if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) - ts_hdr_size = TIMESTAMP_HEADER_SIZE; - - hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) + - ts_hdr_size; + hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); @@ -3004,8 +2997,11 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; struct regpair *fw_handle = &data->rdma_data.async_handle; struct qed_iwarp_ep *ep = NULL; + u16 srq_offset; + u16 srq_id; u16 cid; ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, @@ -3067,6 +3063,24 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, qed_iwarp_cid_cleaned(p_hwfn, cid); break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_EMPTY, + &srq_id); + break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_LIMIT, + &srq_id); + break; case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 8667799d0069..1f6ac848109d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); + SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, + !!(accept_filter & QED_ACCEPT_ANY_VNI)); + p_ramrod->rx_mode.state = cpu_to_le16(state); DP_VERBOSE(p_hwfn, QED_MSG_SP, "p_ramrod->rx_mode.state = 0x%x\n", state); @@ -1677,6 +1680,8 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, HILO_64_REGPAIR(tstats.mftag_filter_discard); p_stats->common.mac_filter_discards += HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + p_stats->common.gft_filter_drop += + HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); } static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, @@ -1852,6 +1857,11 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } + + p_common->link_change_count = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + link_change_count)); } static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, @@ -1959,11 +1969,14 @@ void qed_reset_vport_stats(struct qed_dev *cdev) /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. */ - if (!cdev->reset_stats) + if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); - else + } else { _qed_get_vport_stats(cdev, cdev->reset_stats); + cdev->reset_stats->common.link_change_count = 0; + } } static enum gft_profile_type @@ -1973,6 +1986,8 @@ qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) return GFT_PROFILE_TYPE_4_TUPLE; if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) return GFT_PROFILE_TYPE_IP_DST_ADDR; + if (mode == QED_FILTER_CONFIG_MODE_IP_SRC) + return GFT_PROFILE_TYPE_IP_SRC_ADDR; return GFT_PROFILE_TYPE_L4_DST_PORT; } @@ -2013,16 +2028,6 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc = -EINVAL; - rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); - if (rc) - return rc; - - if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { - rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); - if (rc) - return rc; - } - /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); @@ -2047,15 +2052,28 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); - if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { - p_ramrod->rx_qid_valid = 1; - p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + if (p_params->b_is_drop) { + p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT); + } else { + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + return rc; + + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + rc = qed_fw_l2_queue(p_hwfn, p_params->qid, + &abs_rx_q_id); + if (rc) + return rc; + + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + } + + p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); } p_ramrod->flow_id_valid = 0; p_ramrod->flow_id = 0; - - p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; @@ -2848,6 +2866,24 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } +static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) +{ + int i, ret; + + if (IS_PF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac); + if (ret) + return ret; + } + + return 0; +} + #ifdef CONFIG_QED_SRIOV extern const struct qed_iov_hv_ops qed_iov_ops_pass; #endif @@ -2885,6 +2921,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .ntuple_filter_config = &qed_ntuple_arfs_filter_config, .configure_arfs_searcher = &qed_configure_arfs_searcher, .get_coalesce = &qed_get_coalesce, + .req_bulletin_update_mac = &qed_req_bulletin_update_mac, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index c4030e949cce..806a8da257e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -183,6 +183,7 @@ struct qed_filter_accept_flags { #define QED_ACCEPT_MCAST_MATCHED 0x08 #define QED_ACCEPT_MCAST_UNMATCHED 0x10 #define QED_ACCEPT_BCAST 0x20 +#define QED_ACCEPT_ANY_VNI 0x40 }; struct qed_arfs_config_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 468c59d2e491..c97ebd681c47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -960,12 +960,16 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->inner_vlan_stripping_en = p_ll2_conn->input.rx_vlan_removal_en; + + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) + p_ramrod->report_outer_vlan = 1; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; - if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && - p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && - (conn_type != QED_LL2_TYPE_IWARP)) { + if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && + p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && + conn_type != QED_LL2_TYPE_IWARP) { p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1; } else { @@ -1534,11 +1538,12 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_add_protocol_filter(p_hwfn, p_ptt, + ETH_P_FCOE, 0, + QED_LLH_FILTER_ETHERTYPE); qed_llh_add_protocol_filter(p_hwfn, p_ptt, - 0x8906, 0, - QED_LLH_FILTER_ETHERTYPE); - qed_llh_add_protocol_filter(p_hwfn, p_ptt, - 0x8914, 0, + ETH_P_FIP, 0, QED_LLH_FILTER_ETHERTYPE); } @@ -1694,11 +1699,16 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); if (QED_IS_IWARP_PERSONALITY(p_hwfn) && - p_ll2->input.conn_type == QED_LL2_TYPE_OOO) + p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); - else + } else { start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) + pkt->remove_stag = true; + } + SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, cpu_to_le16(pkt->l4_hdr_offset_w)); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); @@ -1709,6 +1719,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, + !!(pkt->remove_stag)); + start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); @@ -1933,11 +1946,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_remove_protocol_filter(p_hwfn, p_ptt, + ETH_P_FCOE, 0, + QED_LLH_FILTER_ETHERTYPE); qed_llh_remove_protocol_filter(p_hwfn, p_ptt, - 0x8906, 0, - QED_LLH_FILTER_ETHERTYPE); - qed_llh_remove_protocol_filter(p_hwfn, p_ptt, - 0x8914, 0, + ETH_P_FIP, 0, QED_LLH_FILTER_ETHERTYPE); } @@ -2399,7 +2413,8 @@ fail: return -EINVAL; } -static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) +static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, + unsigned long xmit_flags) { struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; @@ -2444,6 +2459,9 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) pkt.first_frag = mapping; pkt.first_frag_len = skb->len; pkt.cookie = skb; + if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && + test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) + pkt.remove_stag = true; rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, &pkt, 1); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 7870ae2a6f7e..b04d57ca5176 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -64,6 +64,7 @@ #define QED_ROCE_QPS (8192) #define QED_ROCE_DPIS (8) +#define QED_RDMA_SRQS QED_ROCE_QPS static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -264,7 +265,6 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); - dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->dev_type = cdev->type; ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); @@ -273,7 +273,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_eng = FW_ENGINEERING_VERSION; - dev_info->mf_mode = cdev->mf_mode; + dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, + &cdev->mf_bits); dev_info->tx_switching = true; if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) @@ -922,6 +923,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, if (IS_ENABLED(CONFIG_QED_RDMA)) { params->rdma_pf_params.num_qps = QED_ROCE_QPS; params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; /* divide by 3 the MRs to avoid MF ILT overflow */ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } @@ -946,6 +948,68 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } +static void qed_slowpath_wq_stop(struct qed_dev *cdev) +{ + int i; + + if (IS_VF(cdev)) + return; + + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].slowpath_wq) + continue; + + flush_workqueue(cdev->hwfns[i].slowpath_wq); + destroy_workqueue(cdev->hwfns[i].slowpath_wq); + } +} + +static void qed_slowpath_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + slowpath_task.work); + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + if (!ptt) { + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + return; + } + + if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, + &hwfn->slowpath_task_flags)) + qed_mfw_process_tlv_req(hwfn, ptt); + + qed_ptt_release(hwfn, ptt); +} + +static int qed_slowpath_wq_start(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn; + char name[NAME_SIZE]; + int i; + + if (IS_VF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + hwfn = &cdev->hwfns[i]; + + snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", + cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + + hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); + if (!hwfn->slowpath_wq) { + DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); + } + + return 0; +} + static int qed_slowpath_start(struct qed_dev *cdev, struct qed_slowpath_params *params) { @@ -961,6 +1025,9 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (qed_iov_wq_start(cdev)) goto err; + if (qed_slowpath_wq_start(cdev)) + goto err; + if (IS_PF(cdev)) { rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, &cdev->pdev->dev); @@ -1095,6 +1162,8 @@ err: qed_iov_wq_stop(cdev, false); + qed_slowpath_wq_stop(cdev); + return rc; } @@ -1103,6 +1172,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (!cdev) return -ENODEV; + qed_slowpath_wq_stop(cdev); + qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { @@ -1894,15 +1965,8 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *ptt = qed_ptt_acquire(hwfn); - int rc; - if (!ptt) - return -EAGAIN; - - rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len); - qed_ptt_release(hwfn, ptt); - return rc; + return qed_mcp_get_nvm_image(hwfn, type, buf, len); } static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, @@ -2095,3 +2159,89 @@ void qed_get_protocol_stats(struct qed_dev *cdev, return; } } + +int qed_mfw_tlv_req(struct qed_hwfn *hwfn) +{ + DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, + "Scheduling slowpath task [Flag: %d]\n", + QED_SLOWPATH_MFW_TLV_REQ); + smp_mb__before_atomic(); + set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + + return 0; +} + +static void +qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) +{ + struct qed_common_cb_ops *op = cdev->protocol_ops.common; + struct qed_eth_stats_common *p_common; + struct qed_generic_tlvs gen_tlvs; + struct qed_eth_stats stats; + int i; + + memset(&gen_tlvs, 0, sizeof(gen_tlvs)); + op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); + + if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) + tlv->flags.ipv4_csum_offload = true; + if (gen_tlvs.feat_flags & QED_TLV_LSO) + tlv->flags.lso_supported = true; + tlv->flags.b_set = true; + + for (i = 0; i < QED_TLV_MAC_COUNT; i++) { + if (is_valid_ether_addr(gen_tlvs.mac[i])) { + ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); + tlv->mac_set[i] = true; + } + } + + qed_get_vport_stats(cdev, &stats); + p_common = &stats.common; + tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + + p_common->rx_bcast_pkts; + tlv->rx_frames_set = true; + tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + + p_common->rx_bcast_bytes; + tlv->rx_bytes_set = true; + tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + + p_common->tx_bcast_pkts; + tlv->tx_frames_set = true; + tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + + p_common->tx_bcast_bytes; + tlv->rx_bytes_set = true; +} + +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_buf) +{ + struct qed_dev *cdev = hwfn->cdev; + struct qed_common_cb_ops *ops; + + ops = cdev->protocol_ops.common; + if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { + DP_NOTICE(hwfn, "Can't collect TLV management info\n"); + return -EINVAL; + } + + switch (type) { + case QED_MFW_TLV_GENERIC: + qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); + break; + case QED_MFW_TLV_ETH: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); + break; + case QED_MFW_TLV_FCOE: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); + break; + case QED_MFW_TLV_ISCSI: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); + break; + default: + break; + } + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index ec0d425766a7..6f9927d1a501 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -40,6 +40,7 @@ #include <linux/string.h> #include <linux/etherdevice.h> #include "qed.h" +#include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" @@ -1486,6 +1487,81 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) &resp, ¶m); } +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 port_cfg, val; + + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + return; + + memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); + port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, oem_cfg_port)); + val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> + OEM_CFG_CHANNEL_TYPE_OFFSET; + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) + DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val); + + val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; + if (val == OEM_CFG_SCHED_TYPE_ETS) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS; + } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; + } else { + p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; + DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val); + } + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >> + OEM_CFG_FUNC_TC_OFFSET; + p_hwfn->ufp_info.tc = (u8)val; + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> + OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; + } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; + } else { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; + DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val); + } + + DP_NOTICE(p_hwfn, + "UFP shmem config: mode = %d tc = %d pri_type = %d\n", + p_hwfn->ufp_info.mode, + p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); +} + +static int +qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_mcp_read_ufp_config(p_hwfn, p_ptt); + + if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; + p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; + + qed_qm_reconf(p_hwfn, p_ptt); + } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { + /* Merge UFP TC with the dcbx TC data */ + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_OPERATIONAL_MIB); + } else { + DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); + return -EINVAL; + } + + /* update storm FW with negotiation results */ + qed_sp_pf_update_ufp(p_hwfn); + + /* update stag pcp value */ + qed_sp_pf_update_stag(p_hwfn); + + return 0; +} + int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -1529,6 +1605,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, qed_dcbx_mib_update_event(p_hwfn, p_ptt, QED_DCBX_OPERATIONAL_MIB); break; + case MFW_DRV_MSG_OEM_CFG_UPDATE: + qed_mcp_handle_ufp_event(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; @@ -1544,6 +1623,8 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_S_TAG_UPDATE: qed_mcp_update_stag(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_GET_TLV_REQ: + qed_mfw_tlv_req(p_hwfn); break; default: DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); @@ -2529,9 +2610,8 @@ err0: return rc; } -static int +int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) { @@ -2546,6 +2626,15 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, case QED_NVM_IMAGE_FCOE_CFG: type = NVM_TYPE_FCOE_CFG; break; + case QED_NVM_IMAGE_NVM_CFG1: + type = NVM_TYPE_NVM_CFG1; + break; + case QED_NVM_IMAGE_DEFAULT_CFG: + type = NVM_TYPE_DEFAULT_CFG; + break; + case QED_NVM_IMAGE_NVM_META: + type = NVM_TYPE_META; + break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", image_id); @@ -2569,7 +2658,6 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, } int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len) { @@ -2578,7 +2666,7 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, memset(p_buffer, 0, buffer_len); - rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att); + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); if (rc) return rc; @@ -2590,9 +2678,6 @@ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, return -EINVAL; } - /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */ - image_att.length -= 4; - if (image_att.length > buffer_len) { DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8a5c988d0c3c..632a838f1fe3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -213,6 +213,44 @@ enum qed_ov_wol { QED_OV_WOL_ENABLED }; +enum qed_mfw_tlv_type { + QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ + QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ + QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ + QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */ + QED_MFW_TLV_MAX = 0x16, +}; + +struct qed_mfw_tlv_generic { +#define QED_MFW_TLV_FLAGS_SIZE 2 + struct { + u8 ipv4_csum_offload; + u8 lso_supported; + bool b_set; + } flags; + +#define QED_MFW_TLV_MAC_COUNT 3 + /* First entry for primary MAC, 2 secondary MACs possible */ + u8 mac[QED_MFW_TLV_MAC_COUNT][6]; + bool mac_set[QED_MFW_TLV_MAC_COUNT]; + + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + +union qed_mfw_tlv_data { + struct qed_mfw_tlv_generic generic; + struct qed_mfw_tlv_eth eth; + struct qed_mfw_tlv_fcoe fcoe; + struct qed_mfw_tlv_iscsi iscsi; +}; + /** * @brief - returns the link params of the hw function * @@ -486,7 +524,20 @@ struct qed_nvm_image_att { * @brief Allows reading a whole nvram image * * @param p_hwfn - * @param p_ptt + * @param image_id - image to get attributes for + * @param p_image_att - image attributes structure into which to fill data + * + * @return int - 0 - operation was successful. + */ +int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att); + +/** + * @brief Allows reading a whole nvram image + * + * @param p_hwfn * @param image_id - image requested for reading * @param p_buffer - allocated buffer into which to fill data * @param buffer_len - length of the allocated buffer. @@ -494,7 +545,6 @@ struct qed_nvm_image_att { * @return 0 iff p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); @@ -549,6 +599,17 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct bist_nvm_image_att *p_image_att, u32 image_index); +/** + * @brief - Processes the TLV request from MFW i.e., get the required TLV info + * from the qed client and send it to the MFW. + * + * @param p_hwfn + * @param p_ptt + * + * @param return 0 upon success. + */ +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields @@ -609,6 +670,14 @@ struct qed_mcp_mb_params { u32 mcp_param; }; +struct qed_drv_tlv_hdr { + u8 tlv_type; + u8 tlv_length; /* In dwords - not including this header */ + u8 tlv_reserved; +#define QED_DRV_TLV_FLAGS_CHANGED 0x01 + u8 tlv_flags; +}; + /** * @brief Initialize the interface with the MCP * @@ -993,6 +1062,14 @@ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** + * @brief Read ufp config from the shared memory. + * + * @param p_hwfn + * @param p_ptt + */ +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** * @brief Populate the nvm info shadow in the given hardware function * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c new file mode 100644 index 000000000000..6c16158d8090 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -0,0 +1,1337 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bug.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/vmalloc.h> +#include "qed.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +#define TLV_TYPE(p) (p[0]) +#define TLV_LENGTH(p) (p[1]) +#define TLV_FLAGS(p) (p[3]) + +#define QED_TLV_DATA_MAX (14) +struct qed_tlv_parsed_buf { + /* To be filled with the address to set in Value field */ + void *p_val; + + /* To be used internally in case the value has to be modified */ + u8 data[QED_TLV_DATA_MAX]; +}; + +static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) +{ + switch (tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + case DRV_TLV_OS_DRIVER_STATES: + case DRV_TLV_PXE_BOOT_PROGRESS: + case DRV_TLV_RX_FRAMES_RECEIVED: + case DRV_TLV_RX_BYTES_RECEIVED: + case DRV_TLV_TX_FRAMES_SENT: + case DRV_TLV_TX_BYTES_SENT: + case DRV_TLV_NPIV_ENABLED: + case DRV_TLV_PCIE_BUS_RX_UTILIZATION: + case DRV_TLV_PCIE_BUS_TX_UTILIZATION: + case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION: + case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED: + case DRV_TLV_NCSI_RX_BYTES_RECEIVED: + case DRV_TLV_NCSI_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_GENERIC; + break; + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + case DRV_TLV_PROMISCUOUS_MODE: + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_IOV_OFFLOAD: + case DRV_TLV_TX_QUEUES_EMPTY: + case DRV_TLV_RX_QUEUES_EMPTY: + case DRV_TLV_TX_QUEUES_FULL: + case DRV_TLV_RX_QUEUES_FULL: + *tlv_group |= QED_MFW_TLV_ETH; + break; + case DRV_TLV_SCSI_TO: + case DRV_TLV_R_T_TOV: + case DRV_TLV_R_A_TOV: + case DRV_TLV_E_D_TOV: + case DRV_TLV_CR_TOV: + case DRV_TLV_BOOT_TYPE: + case DRV_TLV_NPIV_STATE: + case DRV_TLV_NUM_OF_NPIV_IDS: + case DRV_TLV_SWITCH_NAME: + case DRV_TLV_SWITCH_PORT_NUM: + case DRV_TLV_SWITCH_PORT_ID: + case DRV_TLV_VENDOR_NAME: + case DRV_TLV_SWITCH_MODEL: + case DRV_TLV_SWITCH_FW_VER: + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + case DRV_TLV_PORT_ALIAS: + case DRV_TLV_PORT_STATE: + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_LINK_FAILURE_COUNT: + case DRV_TLV_FCOE_BOOT_PROGRESS: + case DRV_TLV_RX_BROADCAST_PACKETS: + case DRV_TLV_TX_BROADCAST_PACKETS: + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + case DRV_TLV_FCOE_TX_FRAMES_SENT: + case DRV_TLV_FCOE_TX_BYTES_SENT: + case DRV_TLV_CRC_ERROR_COUNT: + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + case DRV_TLV_DISPARITY_ERROR_COUNT: + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_RJT: + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + case DRV_TLV_FDISCS_SENT_COUNT: + case DRV_TLV_FDISC_ACCS_RECEIVED: + case DRV_TLV_FDISC_RJTS_RECEIVED: + case DRV_TLV_PLOGI_SENT_COUNT: + case DRV_TLV_PLOGI_ACCS_RECEIVED: + case DRV_TLV_PLOGI_RJTS_RECEIVED: + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_TIMESTAMP: + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + case DRV_TLV_LOGOS_ISSUED: + case DRV_TLV_LOGO_ACCS_RECEIVED: + case DRV_TLV_LOGO_RJTS_RECEIVED: + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_TIMESTAMP: + case DRV_TLV_LOGOS_RECEIVED: + case DRV_TLV_ACCS_ISSUED: + case DRV_TLV_PRLIS_ISSUED: + case DRV_TLV_ACCS_RECEIVED: + case DRV_TLV_ABTS_SENT_COUNT: + case DRV_TLV_ABTS_ACCS_RECEIVED: + case DRV_TLV_ABTS_RJTS_RECEIVED: + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_TIMESTAMP: + case DRV_TLV_RSCNS_RECEIVED: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + case DRV_TLV_LUN_RESETS_ISSUED: + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + case DRV_TLV_TPRLOS_SENT: + case DRV_TLV_NOS_SENT_COUNT: + case DRV_TLV_NOS_RECEIVED_COUNT: + case DRV_TLV_OLS_COUNT: + case DRV_TLV_LR_COUNT: + case DRV_TLV_LRR_COUNT: + case DRV_TLV_LIP_SENT_COUNT: + case DRV_TLV_LIP_RECEIVED_COUNT: + case DRV_TLV_EOFA_COUNT: + case DRV_TLV_EOFNI_COUNT: + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + *tlv_group = QED_MFW_TLV_FCOE; + break; + case DRV_TLV_TARGET_LLMNR_ENABLED: + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + case DRV_TLV_AUTHENTICATION_METHOD: + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + case DRV_TLV_MAX_FRAME_SIZE: + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_ISCSI_BOOT_PROGRESS: + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_ISCSI; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* Returns size of the data buffer or, -1 in case TLV data is not available. */ +static int +qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_generic *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + if (p_drv_buf->flags.b_set) { + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ? + 1 : 0; + p_buf->data[0] |= (p_drv_buf->flags.lso_supported ? + 1 : 0) << 1; + p_buf->p_val = p_buf->data; + return QED_MFW_TLV_FLAGS_SIZE; + } + break; + + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + { + int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR; + + if (p_drv_buf->mac_set[idx]) { + p_buf->p_val = p_drv_buf->mac[idx]; + return ETH_ALEN; + } + break; + } + + case DRV_TLV_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_eth *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + if (p_drv_buf->lso_maxoff_size_set) { + p_buf->p_val = &p_drv_buf->lso_maxoff_size; + return sizeof(p_drv_buf->lso_maxoff_size); + } + break; + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + if (p_drv_buf->lso_minseg_size_set) { + p_buf->p_val = &p_drv_buf->lso_minseg_size; + return sizeof(p_drv_buf->lso_minseg_size); + } + break; + case DRV_TLV_PROMISCUOUS_MODE: + if (p_drv_buf->prom_mode_set) { + p_buf->p_val = &p_drv_buf->prom_mode; + return sizeof(p_drv_buf->prom_mode); + } + break; + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->tx_descr_size; + return sizeof(p_drv_buf->tx_descr_size); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->rx_descr_size; + return sizeof(p_drv_buf->rx_descr_size); + } + break; + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + if (p_drv_buf->netq_count_set) { + p_buf->p_val = &p_drv_buf->netq_count; + return sizeof(p_drv_buf->netq_count); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + if (p_drv_buf->tcp4_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp4_offloads; + return sizeof(p_drv_buf->tcp4_offloads); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + if (p_drv_buf->tcp6_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp6_offloads; + return sizeof(p_drv_buf->tcp6_offloads); + } + break; + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_descr_qdepth; + return sizeof(p_drv_buf->tx_descr_qdepth); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_descr_qdepth; + return sizeof(p_drv_buf->rx_descr_qdepth); + } + break; + case DRV_TLV_IOV_OFFLOAD: + if (p_drv_buf->iov_offload_set) { + p_buf->p_val = &p_drv_buf->iov_offload; + return sizeof(p_drv_buf->iov_offload); + } + break; + case DRV_TLV_TX_QUEUES_EMPTY: + if (p_drv_buf->txqs_empty_set) { + p_buf->p_val = &p_drv_buf->txqs_empty; + return sizeof(p_drv_buf->txqs_empty); + } + break; + case DRV_TLV_RX_QUEUES_EMPTY: + if (p_drv_buf->rxqs_empty_set) { + p_buf->p_val = &p_drv_buf->rxqs_empty; + return sizeof(p_drv_buf->rxqs_empty); + } + break; + case DRV_TLV_TX_QUEUES_FULL: + if (p_drv_buf->num_txqs_full_set) { + p_buf->p_val = &p_drv_buf->num_txqs_full; + return sizeof(p_drv_buf->num_txqs_full); + } + break; + case DRV_TLV_RX_QUEUES_FULL: + if (p_drv_buf->num_rxqs_full_set) { + p_buf->p_val = &p_drv_buf->num_rxqs_full; + return sizeof(p_drv_buf->num_rxqs_full); + } + break; + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time, + struct qed_tlv_parsed_buf *p_buf) +{ + if (!p_time->b_set) + return -1; + + /* Validate numbers */ + if (p_time->month > 12) + p_time->month = 0; + if (p_time->day > 31) + p_time->day = 0; + if (p_time->hour > 23) + p_time->hour = 0; + if (p_time->min > 59) + p_time->hour = 0; + if (p_time->msec > 999) + p_time->msec = 0; + if (p_time->usec > 999) + p_time->usec = 0; + + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + snprintf(p_buf->data, 14, "%d%d%d%d%d%d", + p_time->month, p_time->day, + p_time->hour, p_time->min, p_time->msec, p_time->usec); + + p_buf->p_val = p_buf->data; + + return QED_MFW_TLV_TIME_SIZE; +} + +static int +qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_fcoe *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + struct qed_mfw_tlv_time *p_time; + u8 idx; + + switch (p_tlv->tlv_type) { + case DRV_TLV_SCSI_TO: + if (p_drv_buf->scsi_timeout_set) { + p_buf->p_val = &p_drv_buf->scsi_timeout; + return sizeof(p_drv_buf->scsi_timeout); + } + break; + case DRV_TLV_R_T_TOV: + if (p_drv_buf->rt_tov_set) { + p_buf->p_val = &p_drv_buf->rt_tov; + return sizeof(p_drv_buf->rt_tov); + } + break; + case DRV_TLV_R_A_TOV: + if (p_drv_buf->ra_tov_set) { + p_buf->p_val = &p_drv_buf->ra_tov; + return sizeof(p_drv_buf->ra_tov); + } + break; + case DRV_TLV_E_D_TOV: + if (p_drv_buf->ed_tov_set) { + p_buf->p_val = &p_drv_buf->ed_tov; + return sizeof(p_drv_buf->ed_tov); + } + break; + case DRV_TLV_CR_TOV: + if (p_drv_buf->cr_tov_set) { + p_buf->p_val = &p_drv_buf->cr_tov; + return sizeof(p_drv_buf->cr_tov); + } + break; + case DRV_TLV_BOOT_TYPE: + if (p_drv_buf->boot_type_set) { + p_buf->p_val = &p_drv_buf->boot_type; + return sizeof(p_drv_buf->boot_type); + } + break; + case DRV_TLV_NPIV_STATE: + if (p_drv_buf->npiv_state_set) { + p_buf->p_val = &p_drv_buf->npiv_state; + return sizeof(p_drv_buf->npiv_state); + } + break; + case DRV_TLV_NUM_OF_NPIV_IDS: + if (p_drv_buf->num_npiv_ids_set) { + p_buf->p_val = &p_drv_buf->num_npiv_ids; + return sizeof(p_drv_buf->num_npiv_ids); + } + break; + case DRV_TLV_SWITCH_NAME: + if (p_drv_buf->switch_name_set) { + p_buf->p_val = &p_drv_buf->switch_name; + return sizeof(p_drv_buf->switch_name); + } + break; + case DRV_TLV_SWITCH_PORT_NUM: + if (p_drv_buf->switch_portnum_set) { + p_buf->p_val = &p_drv_buf->switch_portnum; + return sizeof(p_drv_buf->switch_portnum); + } + break; + case DRV_TLV_SWITCH_PORT_ID: + if (p_drv_buf->switch_portid_set) { + p_buf->p_val = &p_drv_buf->switch_portid; + return sizeof(p_drv_buf->switch_portid); + } + break; + case DRV_TLV_VENDOR_NAME: + if (p_drv_buf->vendor_name_set) { + p_buf->p_val = &p_drv_buf->vendor_name; + return sizeof(p_drv_buf->vendor_name); + } + break; + case DRV_TLV_SWITCH_MODEL: + if (p_drv_buf->switch_model_set) { + p_buf->p_val = &p_drv_buf->switch_model; + return sizeof(p_drv_buf->switch_model); + } + break; + case DRV_TLV_SWITCH_FW_VER: + if (p_drv_buf->switch_fw_version_set) { + p_buf->p_val = &p_drv_buf->switch_fw_version; + return sizeof(p_drv_buf->switch_fw_version); + } + break; + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + if (p_drv_buf->qos_pri_set) { + p_buf->p_val = &p_drv_buf->qos_pri; + return sizeof(p_drv_buf->qos_pri); + } + break; + case DRV_TLV_PORT_ALIAS: + if (p_drv_buf->port_alias_set) { + p_buf->p_val = &p_drv_buf->port_alias; + return sizeof(p_drv_buf->port_alias); + } + break; + case DRV_TLV_PORT_STATE: + if (p_drv_buf->port_state_set) { + p_buf->p_val = &p_drv_buf->port_state; + return sizeof(p_drv_buf->port_state); + } + break; + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_tx_descr_size; + return sizeof(p_drv_buf->fip_tx_descr_size); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_rx_descr_size; + return sizeof(p_drv_buf->fip_rx_descr_size); + } + break; + case DRV_TLV_LINK_FAILURE_COUNT: + if (p_drv_buf->link_failures_set) { + p_buf->p_val = &p_drv_buf->link_failures; + return sizeof(p_drv_buf->link_failures); + } + break; + case DRV_TLV_FCOE_BOOT_PROGRESS: + if (p_drv_buf->fcoe_boot_progress_set) { + p_buf->p_val = &p_drv_buf->fcoe_boot_progress; + return sizeof(p_drv_buf->fcoe_boot_progress); + } + break; + case DRV_TLV_RX_BROADCAST_PACKETS: + if (p_drv_buf->rx_bcast_set) { + p_buf->p_val = &p_drv_buf->rx_bcast; + return sizeof(p_drv_buf->rx_bcast); + } + break; + case DRV_TLV_TX_BROADCAST_PACKETS: + if (p_drv_buf->tx_bcast_set) { + p_buf->p_val = &p_drv_buf->tx_bcast; + return sizeof(p_drv_buf->tx_bcast); + } + break; + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_txq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_txq_depth; + return sizeof(p_drv_buf->fcoe_txq_depth); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_rxq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_rxq_depth; + return sizeof(p_drv_buf->fcoe_rxq_depth); + } + break; + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + if (p_drv_buf->fcoe_rx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_frames; + return sizeof(p_drv_buf->fcoe_rx_frames); + } + break; + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + if (p_drv_buf->fcoe_rx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_bytes; + return sizeof(p_drv_buf->fcoe_rx_bytes); + } + break; + case DRV_TLV_FCOE_TX_FRAMES_SENT: + if (p_drv_buf->fcoe_tx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_frames; + return sizeof(p_drv_buf->fcoe_tx_frames); + } + break; + case DRV_TLV_FCOE_TX_BYTES_SENT: + if (p_drv_buf->fcoe_tx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_bytes; + return sizeof(p_drv_buf->fcoe_tx_bytes); + } + break; + case DRV_TLV_CRC_ERROR_COUNT: + if (p_drv_buf->crc_count_set) { + p_buf->p_val = &p_drv_buf->crc_count; + return sizeof(p_drv_buf->crc_count); + } + break; + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->crc_err_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx]; + return sizeof(p_drv_buf->crc_err_src_fcid[idx]); + } + break; + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx], + p_buf); + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + if (p_drv_buf->losync_err_set) { + p_buf->p_val = &p_drv_buf->losync_err; + return sizeof(p_drv_buf->losync_err); + } + break; + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + if (p_drv_buf->losig_err_set) { + p_buf->p_val = &p_drv_buf->losig_err; + return sizeof(p_drv_buf->losig_err); + } + break; + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + if (p_drv_buf->primtive_err_set) { + p_buf->p_val = &p_drv_buf->primtive_err; + return sizeof(p_drv_buf->primtive_err); + } + break; + case DRV_TLV_DISPARITY_ERROR_COUNT: + if (p_drv_buf->disparity_err_set) { + p_buf->p_val = &p_drv_buf->disparity_err; + return sizeof(p_drv_buf->disparity_err); + } + break; + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + if (p_drv_buf->code_violation_err_set) { + p_buf->p_val = &p_drv_buf->code_violation_err; + return sizeof(p_drv_buf->code_violation_err); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1; + if (p_drv_buf->flogi_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_param[idx]; + return sizeof(p_drv_buf->flogi_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1; + + if (p_drv_buf->flogi_acc_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_acc_param[idx]; + return sizeof(p_drv_buf->flogi_acc_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_RJT: + if (p_drv_buf->flogi_rjt_set) { + p_buf->p_val = &p_drv_buf->flogi_rjt; + return sizeof(p_drv_buf->flogi_rjt); + } + break; + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp, + p_buf); + case DRV_TLV_FDISCS_SENT_COUNT: + if (p_drv_buf->fdiscs_set) { + p_buf->p_val = &p_drv_buf->fdiscs; + return sizeof(p_drv_buf->fdiscs); + } + break; + case DRV_TLV_FDISC_ACCS_RECEIVED: + if (p_drv_buf->fdisc_acc_set) { + p_buf->p_val = &p_drv_buf->fdisc_acc; + return sizeof(p_drv_buf->fdisc_acc); + } + break; + case DRV_TLV_FDISC_RJTS_RECEIVED: + if (p_drv_buf->fdisc_rjt_set) { + p_buf->p_val = &p_drv_buf->fdisc_rjt; + return sizeof(p_drv_buf->fdisc_rjt); + } + break; + case DRV_TLV_PLOGI_SENT_COUNT: + if (p_drv_buf->plogi_set) { + p_buf->p_val = &p_drv_buf->plogi; + return sizeof(p_drv_buf->plogi); + } + break; + case DRV_TLV_PLOGI_ACCS_RECEIVED: + if (p_drv_buf->plogi_acc_set) { + p_buf->p_val = &p_drv_buf->plogi_acc; + return sizeof(p_drv_buf->plogi_acc); + } + break; + case DRV_TLV_PLOGI_RJTS_RECEIVED: + if (p_drv_buf->plogi_rjt_set) { + p_buf->p_val = &p_drv_buf->plogi_rjt; + return sizeof(p_drv_buf->plogi_rjt); + } + break; + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->plogi_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx]; + return sizeof(p_drv_buf->plogi_dst_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx], + p_buf); + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->plogi_acc_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2; + p_time = &p_drv_buf->plogi_acc_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + case DRV_TLV_LOGOS_ISSUED: + if (p_drv_buf->tx_plogos_set) { + p_buf->p_val = &p_drv_buf->tx_plogos; + return sizeof(p_drv_buf->tx_plogos); + } + break; + case DRV_TLV_LOGO_ACCS_RECEIVED: + if (p_drv_buf->plogo_acc_set) { + p_buf->p_val = &p_drv_buf->plogo_acc; + return sizeof(p_drv_buf->plogo_acc); + } + break; + case DRV_TLV_LOGO_RJTS_RECEIVED: + if (p_drv_buf->plogo_rjt_set) { + p_buf->p_val = &p_drv_buf->plogo_rjt; + return sizeof(p_drv_buf->plogo_rjt); + } + break; + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) / + 2; + + if (p_drv_buf->plogo_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx]; + return sizeof(p_drv_buf->plogo_src_fcid[idx]); + } + break; + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx], + p_buf); + case DRV_TLV_LOGOS_RECEIVED: + if (p_drv_buf->rx_logos_set) { + p_buf->p_val = &p_drv_buf->rx_logos; + return sizeof(p_drv_buf->rx_logos); + } + break; + case DRV_TLV_ACCS_ISSUED: + if (p_drv_buf->tx_accs_set) { + p_buf->p_val = &p_drv_buf->tx_accs; + return sizeof(p_drv_buf->tx_accs); + } + break; + case DRV_TLV_PRLIS_ISSUED: + if (p_drv_buf->tx_prlis_set) { + p_buf->p_val = &p_drv_buf->tx_prlis; + return sizeof(p_drv_buf->tx_prlis); + } + break; + case DRV_TLV_ACCS_RECEIVED: + if (p_drv_buf->rx_accs_set) { + p_buf->p_val = &p_drv_buf->rx_accs; + return sizeof(p_drv_buf->rx_accs); + } + break; + case DRV_TLV_ABTS_SENT_COUNT: + if (p_drv_buf->tx_abts_set) { + p_buf->p_val = &p_drv_buf->tx_abts; + return sizeof(p_drv_buf->tx_abts); + } + break; + case DRV_TLV_ABTS_ACCS_RECEIVED: + if (p_drv_buf->rx_abts_acc_set) { + p_buf->p_val = &p_drv_buf->rx_abts_acc; + return sizeof(p_drv_buf->rx_abts_acc); + } + break; + case DRV_TLV_ABTS_RJTS_RECEIVED: + if (p_drv_buf->rx_abts_rjt_set) { + p_buf->p_val = &p_drv_buf->rx_abts_rjt; + return sizeof(p_drv_buf->rx_abts_rjt); + } + break; + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->abts_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx]; + return sizeof(p_drv_buf->abts_dst_fcid[idx]); + } + break; + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx], + p_buf); + case DRV_TLV_RSCNS_RECEIVED: + if (p_drv_buf->rx_rscn_set) { + p_buf->p_val = &p_drv_buf->rx_rscn; + return sizeof(p_drv_buf->rx_rscn); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1; + + if (p_drv_buf->rx_rscn_nport_set[idx]) { + p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx]; + return sizeof(p_drv_buf->rx_rscn_nport[idx]); + } + break; + case DRV_TLV_LUN_RESETS_ISSUED: + if (p_drv_buf->tx_lun_rst_set) { + p_buf->p_val = &p_drv_buf->tx_lun_rst; + return sizeof(p_drv_buf->tx_lun_rst); + } + break; + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + if (p_drv_buf->abort_task_sets_set) { + p_buf->p_val = &p_drv_buf->abort_task_sets; + return sizeof(p_drv_buf->abort_task_sets); + } + break; + case DRV_TLV_TPRLOS_SENT: + if (p_drv_buf->tx_tprlos_set) { + p_buf->p_val = &p_drv_buf->tx_tprlos; + return sizeof(p_drv_buf->tx_tprlos); + } + break; + case DRV_TLV_NOS_SENT_COUNT: + if (p_drv_buf->tx_nos_set) { + p_buf->p_val = &p_drv_buf->tx_nos; + return sizeof(p_drv_buf->tx_nos); + } + break; + case DRV_TLV_NOS_RECEIVED_COUNT: + if (p_drv_buf->rx_nos_set) { + p_buf->p_val = &p_drv_buf->rx_nos; + return sizeof(p_drv_buf->rx_nos); + } + break; + case DRV_TLV_OLS_COUNT: + if (p_drv_buf->ols_set) { + p_buf->p_val = &p_drv_buf->ols; + return sizeof(p_drv_buf->ols); + } + break; + case DRV_TLV_LR_COUNT: + if (p_drv_buf->lr_set) { + p_buf->p_val = &p_drv_buf->lr; + return sizeof(p_drv_buf->lr); + } + break; + case DRV_TLV_LRR_COUNT: + if (p_drv_buf->lrr_set) { + p_buf->p_val = &p_drv_buf->lrr; + return sizeof(p_drv_buf->lrr); + } + break; + case DRV_TLV_LIP_SENT_COUNT: + if (p_drv_buf->tx_lip_set) { + p_buf->p_val = &p_drv_buf->tx_lip; + return sizeof(p_drv_buf->tx_lip); + } + break; + case DRV_TLV_LIP_RECEIVED_COUNT: + if (p_drv_buf->rx_lip_set) { + p_buf->p_val = &p_drv_buf->rx_lip; + return sizeof(p_drv_buf->rx_lip); + } + break; + case DRV_TLV_EOFA_COUNT: + if (p_drv_buf->eofa_set) { + p_buf->p_val = &p_drv_buf->eofa; + return sizeof(p_drv_buf->eofa); + } + break; + case DRV_TLV_EOFNI_COUNT: + if (p_drv_buf->eofni_set) { + p_buf->p_val = &p_drv_buf->eofni; + return sizeof(p_drv_buf->eofni); + } + break; + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + if (p_drv_buf->scsi_chks_set) { + p_buf->p_val = &p_drv_buf->scsi_chks; + return sizeof(p_drv_buf->scsi_chks); + } + break; + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_cond_met; + return sizeof(p_drv_buf->scsi_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + if (p_drv_buf->scsi_busy_set) { + p_buf->p_val = &p_drv_buf->scsi_busy; + return sizeof(p_drv_buf->scsi_busy); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + if (p_drv_buf->scsi_inter_set) { + p_buf->p_val = &p_drv_buf->scsi_inter; + return sizeof(p_drv_buf->scsi_inter); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_inter_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_inter_cond_met; + return sizeof(p_drv_buf->scsi_inter_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + if (p_drv_buf->scsi_rsv_conflicts_set) { + p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts; + return sizeof(p_drv_buf->scsi_rsv_conflicts); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + if (p_drv_buf->scsi_tsk_full_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_full; + return sizeof(p_drv_buf->scsi_tsk_full); + } + break; + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + if (p_drv_buf->scsi_aca_active_set) { + p_buf->p_val = &p_drv_buf->scsi_aca_active; + return sizeof(p_drv_buf->scsi_aca_active); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + if (p_drv_buf->scsi_tsk_abort_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_abort; + return sizeof(p_drv_buf->scsi_tsk_abort); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + idx = (p_tlv->tlv_type - + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2; + + if (p_drv_buf->scsi_rx_chk_set[idx]) { + p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx]; + return sizeof(p_drv_buf->scsi_rx_chk[idx]); + } + break; + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2; + p_time = &p_drv_buf->scsi_chk_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_iscsi *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_TARGET_LLMNR_ENABLED: + if (p_drv_buf->target_llmnr_set) { + p_buf->p_val = &p_drv_buf->target_llmnr; + return sizeof(p_drv_buf->target_llmnr); + } + break; + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + if (p_drv_buf->header_digest_set) { + p_buf->p_val = &p_drv_buf->header_digest; + return sizeof(p_drv_buf->header_digest); + } + break; + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + if (p_drv_buf->data_digest_set) { + p_buf->p_val = &p_drv_buf->data_digest; + return sizeof(p_drv_buf->data_digest); + } + break; + case DRV_TLV_AUTHENTICATION_METHOD: + if (p_drv_buf->auth_method_set) { + p_buf->p_val = &p_drv_buf->auth_method; + return sizeof(p_drv_buf->auth_method); + } + break; + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + if (p_drv_buf->boot_taget_portal_set) { + p_buf->p_val = &p_drv_buf->boot_taget_portal; + return sizeof(p_drv_buf->boot_taget_portal); + } + break; + case DRV_TLV_MAX_FRAME_SIZE: + if (p_drv_buf->frame_size_set) { + p_buf->p_val = &p_drv_buf->frame_size; + return sizeof(p_drv_buf->frame_size); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_desc_size_set) { + p_buf->p_val = &p_drv_buf->tx_desc_size; + return sizeof(p_drv_buf->tx_desc_size); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_desc_size_set) { + p_buf->p_val = &p_drv_buf->rx_desc_size; + return sizeof(p_drv_buf->rx_desc_size); + } + break; + case DRV_TLV_ISCSI_BOOT_PROGRESS: + if (p_drv_buf->boot_progress_set) { + p_buf->p_val = &p_drv_buf->boot_progress; + return sizeof(p_drv_buf->boot_progress); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_desc_qdepth; + return sizeof(p_drv_buf->tx_desc_qdepth); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_desc_qdepth; + return sizeof(p_drv_buf->rx_desc_qdepth); + } + break; + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, + u8 tlv_group, u8 *p_mfw_buf, u32 size) +{ + union qed_mfw_tlv_data *p_tlv_data; + struct qed_tlv_parsed_buf buffer; + struct qed_drv_tlv_hdr tlv; + int len = 0; + u32 offset; + u8 *p_tlv; + + p_tlv_data = vzalloc(sizeof(*p_tlv_data)); + if (!p_tlv_data) + return -ENOMEM; + + if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) { + vfree(p_tlv_data); + return -EINVAL; + } + + memset(&tlv, 0, sizeof(tlv)); + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_tlv = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_tlv); + tlv.tlv_length = TLV_LENGTH(p_tlv); + tlv.tlv_flags = TLV_FLAGS(p_tlv); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Type %d length = %d flags = 0x%x\n", tlv.tlv_type, + tlv.tlv_length, tlv.tlv_flags); + + if (tlv_group == QED_MFW_TLV_GENERIC) + len = qed_mfw_get_gen_tlv_value(&tlv, + &p_tlv_data->generic, + &buffer); + else if (tlv_group == QED_MFW_TLV_ETH) + len = qed_mfw_get_eth_tlv_value(&tlv, + &p_tlv_data->eth, + &buffer); + else if (tlv_group == QED_MFW_TLV_FCOE) + len = qed_mfw_get_fcoe_tlv_value(&tlv, + &p_tlv_data->fcoe, + &buffer); + else + len = qed_mfw_get_iscsi_tlv_value(&tlv, + &p_tlv_data->iscsi, + &buffer); + + if (len > 0) { + WARN(len > 4 * tlv.tlv_length, + "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n", + len, 4 * tlv.tlv_length); + len = min_t(int, len, 4 * tlv.tlv_length); + tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED; + TLV_FLAGS(p_tlv) = tlv.tlv_flags; + memcpy(p_mfw_buf + offset + sizeof(tlv), + buffer.p_val, len); + } + } + + vfree(p_tlv_data); + + return 0; +} + +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 addr, size, offset, resp, param, val, global_offsize, global_addr; + u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp; + struct qed_drv_tlv_hdr tlv; + int rc; + + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, data_ptr); + addr = qed_rd(p_hwfn, p_ptt, addr); + size = qed_rd(p_hwfn, p_ptt, global_addr + + offsetof(struct public_global, data_size)); + + if (!size) { + DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size); + goto drv_done; + } + + p_mfw_buf = vzalloc(size); + if (!p_mfw_buf) { + DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n"); + goto drv_done; + } + + /* Read the TLV request to local buffer. MFW represents the TLV in + * little endian format and mcp returns it bigendian format. Hence + * driver need to convert data to little endian first and then do the + * memcpy (casting) to preserve the MFW TLV format in the driver buffer. + * + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + val = qed_rd(p_hwfn, p_ptt, addr + offset); + val = be32_to_cpu(val); + memcpy(&p_mfw_buf[offset], &val, sizeof(u32)); + } + + /* Parse the headers to enumerate the requested TLV groups */ + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_temp = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_temp); + tlv.tlv_length = TLV_LENGTH(p_temp); + if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group)) + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, + "Un recognized TLV %d\n", tlv.tlv_type); + } + + /* Sanitize the TLV groups according to personality */ + if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping L2 TLVs for non-L2 function\n"); + tlv_group &= ~QED_MFW_TLV_ETH; + } + + if ((tlv_group & QED_MFW_TLV_FCOE) && + p_hwfn->hw_info.personality != QED_PCI_FCOE) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping FCoE TLVs for non-FCoE function\n"); + tlv_group &= ~QED_MFW_TLV_FCOE; + } + + if ((tlv_group & QED_MFW_TLV_ISCSI) && + p_hwfn->hw_info.personality != QED_PCI_ISCSI) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping iSCSI TLVs for non-iSCSI function\n"); + tlv_group &= ~QED_MFW_TLV_ISCSI; + } + + /* Update the TLV values in the local buffer */ + for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { + if (tlv_group & id) + if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size)) + goto drv_done; + } + + /* Write the TLV data to shared memory. The stream of 4 bytes first need + * to be mem-copied to u32 element to make it as LSB format. And then + * converted to big endian as required by mcp-write. + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + memcpy(&val, &p_mfw_buf[offset], sizeof(u32)); + val = cpu_to_be32(val); + qed_wr(p_hwfn, p_ptt, addr + offset, val); + } + +drv_done: + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp, + ¶m); + + vfree(p_mfw_buf); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index a411f9c702a1..101d677114f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -259,15 +259,29 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, goto free_cid_map; } + /* Allocate bitmap for srqs */ + p_rdma_info->num_srqs = qed_cxt_get_srq_count(p_hwfn); + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, + p_rdma_info->num_srqs, "SRQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate srq bitmap, rc = %d\n", rc); + goto free_real_cid_map; + } + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) rc = qed_iwarp_alloc(p_hwfn); if (rc) - goto free_cid_map; + goto free_srq_map; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); return 0; +free_srq_map: + kfree(p_rdma_info->srq_map.bitmap); +free_real_cid_map: + kfree(p_rdma_info->real_cid_map.bitmap); free_cid_map: kfree(p_rdma_info->cid_map.bitmap); free_tid_map: @@ -351,6 +365,8 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); @@ -431,6 +447,12 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, if (cdev->rdma_max_sge) dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); + dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; + if (p_hwfn->cdev->rdma_max_srq_sge) { + dev->max_srq_sge = min_t(u32, + p_hwfn->cdev->rdma_max_srq_sge, + dev->max_srq_sge); + } dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; dev->max_inline = (cdev->rdma_max_inline) ? @@ -474,6 +496,8 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; dev->max_pkey = QED_RDMA_MAX_P_KEY; + dev->max_srq = p_hwfn->p_rdma_info->num_srqs; + dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / @@ -1484,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt, case QED_RDMA_TID_FMR: tid_type = RDMA_TID_FMR; break; - case QED_RDMA_TID_MW_TYPE1: - tid_type = RDMA_TID_MW_TYPE1; - break; - case QED_RDMA_TID_MW_TYPE2A: - tid_type = RDMA_TID_MW_TYPE2A; + case QED_RDMA_TID_MW: + tid_type = RDMA_TID_MW; break; default: rc = -EINVAL; @@ -1520,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt, RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); DMA_REGPAIR_LE(p_ramrod->dif_error_addr, params->dif_error_addr); - DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr); } rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); @@ -1628,6 +1648,155 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) return QED_LEADING_HWFN(cdev); } +static int qed_rdma_modify_srq(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *in_params) +{ + struct rdma_srq_modify_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + u16 opaque_fid; + int rc; + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_MODIFY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_modify_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + opaque_fid = p_hwfn->hw_info.opaque_fid; + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_destroy_srq(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *in_params) +{ + struct rdma_srq_destroy_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + struct qed_bmap *bmap; + u16 opaque_fid; + int rc; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DESTROY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_destroy_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "SRQ destroyed Id = %x", + in_params->srq_id); + + return rc; +} + +static int +qed_rdma_create_srq(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *in_params, + struct qed_rdma_create_srq_out_params *out_params) +{ + struct rdma_srq_create_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + enum qed_cxt_elem_type elem_type; + struct qed_spq_entry *p_ent; + u16 opaque_fid, srq_id; + struct qed_bmap *bmap; + u32 returned_id; + int rc; + + bmap = &p_hwfn->p_rdma_info->srq_map; + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_NOTICE(p_hwfn, "failed to allocate srq id\n"); + return rc; + } + + elem_type = QED_ELEM_SRQ; + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); + if (rc) + goto err; + /* returned id is no greater than u16 */ + srq_id = (u16)returned_id; + opaque_fid = p_hwfn->hw_info.opaque_fid; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_CREATE_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_create_srq; + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); + p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); + p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); + p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->page_size = cpu_to_le16(in_params->page_size); + DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + out_params->srq_id = srq_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "SRQ created Id = %x\n", out_params->srq_id); + + return rc; + +err: + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + return rc; +} + bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) { bool result; @@ -1773,6 +1942,9 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .rdma_free_tid = &qed_rdma_free_tid, .rdma_register_tid = &qed_rdma_register_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid, + .rdma_create_srq = &qed_rdma_create_srq, + .rdma_modify_srq = &qed_rdma_modify_srq, + .rdma_destroy_srq = &qed_rdma_destroy_srq, .ll2_acquire_connection = &qed_ll2_acquire_connection, .ll2_establish_connection = &qed_ll2_establish_connection, .ll2_terminate_connection = &qed_ll2_terminate_connection, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 18ec9cbd84f5..6f722ee8ee94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -96,6 +96,8 @@ struct qed_rdma_info { u8 num_cnqs; u32 num_qps; u32 num_mrs; + u32 num_srqs; + u16 srq_id_offset; u16 queue_zone_base; u16 max_queue_zones; enum protocol_type proto; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f7122059b6b5..d8ad2dcad8d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -178,7 +178,7 @@ 0x008c80UL #define MCP_REG_SCRATCH \ 0xe20000UL -#define CNIG_REG_NW_PORT_MODE_BB_B0 \ +#define CNIG_REG_NW_PORT_MODE_BB \ 0x218200UL #define MISCS_REG_CHIP_NUM \ 0x00976cUL @@ -1621,6 +1621,7 @@ #define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1 #define PRS_REG_SEARCH_GFT 0x1f11bcUL +#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL #define PRS_REG_CM_HDR_GFT 0x1f11c8UL #define PRS_REG_GFT_CAM 0x1f1100UL #define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 6acfd43c1a4f..b5ce1581645f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -65,6 +65,8 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { u16 icid = (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); @@ -75,11 +77,18 @@ qed_roce_async_event(struct qed_hwfn *p_hwfn, */ qed_roce_free_real_icid(p_hwfn, icid); } else { - struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events; + if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || + fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { + u16 srq_id = (u16)data->rdma_data.async_handle.lo; + + events.affiliated_event(events.context, fw_event_code, + &srq_id); + } else { + union rdma_eqe_data rdata = data->rdma_data; - events->affiliated_event(p_hwfn->p_rdma_info->events.context, - fw_event_code, - (void *)&data->rdma_data.async_handle); + events.affiliated_event(events.context, fw_event_code, + (void *)&rdata.async_handle); + } } return 0; @@ -672,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, - u32 *num_invalidated_mw, u32 *cq_prod) { struct roce_destroy_qp_resp_output_params *p_ramrod_res; @@ -683,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - - *num_invalidated_mw = 0; *cq_prod = qp->cq_prod; if (!qp->resp_offloaded) { @@ -733,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); qp->cq_prod = *cq_prod; @@ -755,8 +760,7 @@ err: } static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - u32 *num_bound_mw) + struct qed_rdma_qp *qp) { struct roce_destroy_qp_req_output_params *p_ramrod_res; struct roce_destroy_qp_req_ramrod_data *p_ramrod; @@ -798,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw); /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, @@ -959,8 +962,6 @@ err_resp: int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { - u32 num_invalidated_mw = 0; - u32 num_bound_mw = 0; u32 cq_prod; int rc; @@ -975,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) return rc; /* Send destroy requester ramrod */ - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); if (rc) return rc; - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } } return 0; @@ -1001,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, enum qed_roce_qp_state prev_state, struct qed_rdma_modify_qp_in_params *params) { - u32 num_invalidated_mw = 0, num_bound_mw = 0; int rc = 0; /* Perform additional operations according to the current state and the @@ -1081,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, /* Send destroy responder ramrod */ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) @@ -1089,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, qp->cq_prod = cq_prod; - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index ab4ad8a1e2a5..e95431f6acd4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -416,7 +416,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_ptt * @param p_tunn - * @param mode * @param allow_npar_tx_switch * * @return int @@ -425,7 +424,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, - enum qed_mf_mode mode, bool allow_npar_tx_switch); + bool allow_npar_tx_switch); /** * @brief qed_sp_pf_update - PF Function Update Ramrod @@ -463,6 +462,15 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); * @return int */ +/** + * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * + * @param p_hwfn + * + * @return int + */ +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); + int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 5e927b6cac22..8de644b4721e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -306,7 +306,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_tunnel_info *p_tunn, - enum qed_mf_mode mode, bool allow_npar_tx_switch) + bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); @@ -314,7 +314,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; - u8 page_cnt; + u8 page_cnt, i; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, @@ -339,21 +339,36 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); - switch (mode) { - case QED_MF_DEFAULT: - case QED_MF_NPAR: - p_ramrod->mf_mode = MF_NPAR; - break; - case QED_MF_OVLAN: + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) p_ramrod->mf_mode = MF_OVLAN; - break; - default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + else p_ramrod->mf_mode = MF_NPAR; - } p_ramrod->outer_tag_config.outer_tag.tci = - cpu_to_le16(p_hwfn->hw_info.ovlan); + cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + } + + p_ramrod->outer_tag_config.pri_map_valid = 1; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode. + */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + else + p_ramrod->outer_tag_config.enable_stag_pri_change = 0; + + p_ramrod->outer_tag_config.outer_tag.tci |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); + } /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, @@ -365,7 +380,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); - if (IS_MF_SI(p_hwfn)) + if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; switch (p_hwfn->hw_info.personality) { @@ -434,6 +449,39 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EOPNOTSUPP; + + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { + DP_INFO(p_hwfn, "Invalid priority type %d\n", + p_hwfn->ufp_info.pri_type); + return -EINVAL; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + p_ent->ramrod.pf_update.enable_stag_pri_change = 1; + else + p_ent->ramrod.pf_update.enable_stag_pri_change = 0; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + /* Set pf update ramrod command params */ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 5acb91b3564c..f01bf52bc381 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -48,7 +48,7 @@ static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data, u8 fw_return_code); - +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { @@ -1790,7 +1790,8 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, if (!p_vf->vport_instance) return -EINVAL; - if (events & BIT(MAC_ADDR_FORCED)) { + if ((events & BIT(MAC_ADDR_FORCED)) || + p_vf->p_vf_info.is_trusted_configured) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1809,8 +1810,12 @@ static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, "PF failed to configure MAC for VF\n"); return rc; } - - p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + if (p_vf->p_vf_info.is_trusted_configured) + p_vf->configured_features |= + BIT(VFPF_BULLETIN_MAC_ADDR); + else + p_vf->configured_features |= + BIT(MAC_ADDR_FORCED); } if (events & BIT(VLAN_ADDR_FORCED)) { @@ -3170,6 +3175,10 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) return 0; + /* Don't keep track of shadow copy since we don't intend to restore. */ + if (p_vf->p_vf_info.is_trusted_configured) + return 0; + /* First remove entries and then add new ones */ if (p_params->opcode == QED_FILTER_REMOVE) { for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { @@ -3244,9 +3253,17 @@ static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, /* No real decision to make; Store the configured MAC */ if (params->type == QED_FILTER_MAC || - params->type == QED_FILTER_MAC_VLAN) + params->type == QED_FILTER_MAC_VLAN) { ether_addr_copy(vf->mac, params->mac); + if (vf->is_trusted_configured) { + qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); + + /* Update and post bulleitin again */ + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + return 0; } @@ -3803,6 +3820,40 @@ static void qed_iov_get_link(struct qed_hwfn *p_hwfn, __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); } +static int +qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct vfpf_bulletin_update_mac_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + int rc = 0; + + if (!p_vf->p_vf_info.is_trusted_configured) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Blocking bulletin update request from untrusted VF[%d]\n", + p_vf->abs_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + rc = -EINVAL; + goto send_status; + } + + p_req = &mbx->req_virt->bulletin_update_mac; + ether_addr_copy(p_bulletin->mac, p_req->mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Updated bulletin of VF[%d] with requested MAC[%pM]\n", + p_vf->abs_vf_id, p_req->mac); + +send_status: + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(struct pfvf_def_resp_tlv), status); + return rc; +} + static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid) { @@ -3882,6 +3933,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_COALESCE_READ: qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_BULLETIN_UPDATE_MAC: + qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -4081,16 +4135,60 @@ static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, return; } - feature = 1 << MAC_ADDR_FORCED; + if (vf_info->p_vf_info.is_trusted_configured) { + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + /* Trust mode will disable Forced MAC */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + } else { + feature = BIT(MAC_ADDR_FORCED); + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(VFPF_BULLETIN_MAC_ADDR); + } + memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; - /* Forced MAC will disable MAC_ADDR */ - vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR); qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); } +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Can not set MAC, Forced MAC is configured\n"); + return -EINVAL; + } + + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + if (vf_info->p_vf_info.is_trusted_configured) + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); + + return 0; +} + static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, u16 pvid, int vfid) { @@ -4204,6 +4302,21 @@ out: return rc; } +static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + BIT(VFPF_BULLETIN_MAC_ADDR))) + return NULL; + + return p_vf->bulletin.p_virt->mac; +} + static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { @@ -4493,8 +4606,12 @@ static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) if (!vf_info) continue; - /* Set the forced MAC, and schedule the IOV task */ - ether_addr_copy(vf_info->forced_mac, mac); + /* Set the MAC, and schedule the IOV task */ + if (vf_info->is_trusted_configured) + ether_addr_copy(vf_info->mac, mac); + else + ether_addr_copy(vf_info->forced_mac, mac); + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); } @@ -4802,6 +4919,33 @@ static void qed_handle_vf_msg(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, + u8 *mac, + struct qed_public_vf_info *info) +{ + if (info->is_trusted_configured) { + if (is_valid_ether_addr(info->mac) && + (!mac || !ether_addr_equal(mac, info->mac))) + return true; + } else { + if (is_valid_ether_addr(info->forced_mac) && + (!mac || !ether_addr_equal(mac, info->forced_mac))) + return true; + } + + return false; +} + +static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, + struct qed_public_vf_info *info, + int vfid) +{ + if (info->is_trusted_configured) + qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); + else + qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); +} + static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) { int i; @@ -4816,18 +4960,20 @@ static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) continue; /* Update data on bulletin board */ - mac = qed_iov_bulletin_get_forced_mac(hwfn, i); - if (is_valid_ether_addr(info->forced_mac) && - (!mac || !ether_addr_equal(mac, info->forced_mac))) { + if (info->is_trusted_configured) + mac = qed_iov_bulletin_get_mac(hwfn, i); + else + mac = qed_iov_bulletin_get_forced_mac(hwfn, i); + + if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { DP_VERBOSE(hwfn, QED_MSG_IOV, "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); - /* Update bulletin board with forced MAC */ - qed_iov_bulletin_set_forced_mac(hwfn, - info->forced_mac, i); + /* Update bulletin board with MAC */ + qed_set_bulletin_mac(hwfn, info, i); update = true; } @@ -4867,6 +5013,72 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) qed_ptt_release(hwfn, ptt); } +static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) +{ + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 *force_mac; + int i; + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + vf = qed_iov_get_vf_info(hwfn, vf_id, true); + + if (!vf_info || !vf) + return; + + /* Force MAC converted to generic MAC in case of VF trust on */ + if (vf_info->is_trusted_configured && + (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { + force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); + + if (force_mac) { + /* Clear existing shadow copy of MAC to have a clean + * slate. + */ + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + vf_info->mac)) { + memset(vf->shadow_config.macs[i], 0, + ETH_ALEN); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", + vf_info->mac, vf_id); + break; + } + } + + ether_addr_copy(vf_info->mac, force_mac); + memset(vf_info->forced_mac, 0, ETH_ALEN); + vf->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + + /* Update shadow copy with VF MAC when trust mode is turned off */ + if (!vf_info->is_trusted_configured) { + u8 empty_mac[ETH_ALEN]; + + memset(empty_mac, 0, ETH_ALEN); + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + empty_mac)) { + ether_addr_copy(vf->shadow_config.macs[i], + vf_info->mac); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", + vf_info->mac, vf_id); + break; + } + } + /* Clear bulletin when trust mode is turned off, + * to have a clean slate for next (normal) operations. + */ + qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } +} + static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) { struct qed_sp_vport_update_params params; @@ -4890,6 +5102,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) continue; vf_info->is_trusted_configured = vf_info->is_trusted_request; + /* Handle forced MAC mode */ + qed_update_mac_for_vf_trust_change(hwfn, i); + /* Validate that the VF has a configured vport */ vf = qed_iov_get_vf_info(hwfn, i, true); if (!vf->vport_instance) diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 91b5e9f02a62..2d7fcd6a0777 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1375,6 +1375,35 @@ exit: } int +qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + u8 *p_mac) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_bulletin_update_mac_tlv *p_req; + struct pfvf_def_resp_tlv *p_resp; + int rc; + + if (!p_mac) + return -EINVAL; + + /* clear mailbox and prep header tlv */ + p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(*p_req)); + ether_addr_copy(p_req->mac, p_mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requesting bulletin update for MAC[%pM]\n", p_mac); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + +int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 97d44dfb38ca..4f05d5eb3cf5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -518,6 +518,12 @@ struct pfvf_read_coal_resp_tlv { u8 padding[6]; }; +struct vfpf_bulletin_update_mac_tlv { + struct vfpf_first_tlv first_tlv; + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -532,6 +538,7 @@ union vfpf_tlvs { struct vfpf_update_tunn_param_tlv tunn_param_update; struct vfpf_update_coalesce update_coalesce; struct vfpf_read_coal_req_tlv read_coal_req; + struct vfpf_bulletin_update_mac_tlv bulletin_update_mac; struct tlv_buffer_size tlv_buf_size; }; @@ -650,6 +657,7 @@ enum { CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, CHANNEL_TLV_COALESCE_READ, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -1042,6 +1050,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_tunn); u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); +/** + * @brief - Ask PF to update the MAC address in it's bulletin board + * + * @param p_mac - mac address to be updated in bulletin board + */ +int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); + #else static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params) @@ -1228,6 +1243,12 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, return -EINVAL; } +static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + u8 *p_mac) +{ + return -EINVAL; +} + static inline u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 9935978c5542..d7ed0d3dbf71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -75,6 +75,7 @@ struct qede_stats_common { u64 rx_bcast_pkts; u64 mftag_filter_discards; u64 mac_filter_discards; + u64 gft_filter_drop; u64 tx_ucast_bytes; u64 tx_mcast_bytes; u64 tx_bcast_bytes; @@ -87,6 +88,7 @@ struct qede_stats_common { u64 coalesced_aborts_num; u64 non_coalesced_pkts; u64 coalesced_bytes; + u64 link_change_count; /* port */ u64 rx_64_byte_packets; @@ -290,15 +292,12 @@ struct qede_agg_info { * aggregation. */ struct sw_rx_data buffer; - dma_addr_t buffer_mapping; - struct sk_buff *skb; /* We need some structs from the start cookie until termination */ u16 vlan_tag; - u16 start_cqe_bd_len; - u8 start_cqe_placement_offset; + bool tpa_start_fail; u8 state; u8 frag_id; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index ecbf1ded7a39..f4a0f8ff8261 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -161,6 +161,7 @@ static const struct { QEDE_STAT(no_buff_discards), QEDE_PF_STAT(mftag_filter_discards), QEDE_PF_STAT(mac_filter_discards), + QEDE_PF_STAT(gft_filter_drop), QEDE_STAT(tx_err_drop_pkts), QEDE_STAT(ttl0_discard), QEDE_STAT(packet_too_big_discard), @@ -170,6 +171,8 @@ static const struct { QEDE_STAT(coalesced_aborts_num), QEDE_STAT(non_coalesced_pkts), QEDE_STAT(coalesced_bytes), + + QEDE_STAT(link_change_count), }; #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) @@ -1508,7 +1511,8 @@ static int qede_selftest_receive_traffic(struct qede_dev *edev) len = le16_to_cpu(fp_cqe->len_on_first_bd); data_ptr = (u8 *)(page_address(sw_rx_data->data) + fp_cqe->placement_offset + - sw_rx_data->page_offset); + sw_rx_data->page_offset + + rxq->rx_headroom); if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) && ether_addr_equal(data_ptr + ETH_ALEN, edev->ndev->dev_addr)) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 6687e04d1558..e9e088d9c815 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -38,6 +38,7 @@ #include <linux/qed/qed_if.h> #include "qede.h" +#define QEDE_FILTER_PRINT_MAX_LEN (64) struct qede_arfs_tuple { union { __be32 src_ipv4; @@ -51,6 +52,18 @@ struct qede_arfs_tuple { __be16 dst_port; __be16 eth_proto; u8 ip_proto; + + /* Describe filtering mode needed for this kind of filter */ + enum qed_filter_config_mode mode; + + /* Used to compare new/old filters. Return true if IPs match */ + bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b); + + /* Given an address into ethhdr build a header from tuple info */ + void (*build_hdr)(struct qede_arfs_tuple *t, void *header); + + /* Stringify the tuple for a print into the provided buffer */ + void (*stringify)(struct qede_arfs_tuple *t, void *buffer); }; struct qede_arfs_fltr_node { @@ -73,9 +86,11 @@ struct qede_arfs_fltr_node { u16 sw_id; u16 rxq_id; u16 next_rxq_id; + u8 vfid; bool filter_op; bool used; u8 fw_rc; + bool b_is_drop; struct hlist_node node; }; @@ -90,7 +105,9 @@ struct qede_arfs { spinlock_t arfs_list_lock; unsigned long *arfs_fltr_bmap; int filter_count; - bool enable; + + /* Currently configured filtering mode */ + enum qed_filter_config_mode mode; }; static void qede_configure_arfs_fltr(struct qede_dev *edev, @@ -109,12 +126,22 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, params.length = n->buf_len; params.qid = rxq_id; params.b_is_add = add_fltr; + params.b_is_drop = n->b_is_drop; + + if (n->vfid) { + params.b_is_vf = true; + params.vf_id = n->vfid - 1; + } - DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, - "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", - add_fltr ? "Adding" : "Deleting", - n->flow_id, n->sw_id, ntohs(n->tuple.src_port), - ntohs(n->tuple.dst_port), rxq_id); + if (n->tuple.stringify) { + char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN]; + + n->tuple.stringify(&n->tuple, tuple_buffer); + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, + "%s sw_id[0x%x]: %s [vf %u queue %d]\n", + add_fltr ? "Adding" : "Deleting", + n->sw_id, tuple_buffer, n->vfid, rxq_id); + } n->used = true; n->filter_op = add_fltr; @@ -145,14 +172,13 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, INIT_HLIST_NODE(&fltr->node); hlist_add_head(&fltr->node, QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); - edev->arfs->filter_count++; - - if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - enum qed_filter_config_mode mode; - mode = QED_FILTER_CONFIG_MODE_5_TUPLE; - edev->ops->configure_arfs_searcher(edev->cdev, mode); - edev->arfs->enable = true; + edev->arfs->filter_count++; + if (edev->arfs->filter_count == 1 && + edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) { + edev->ops->configure_arfs_searcher(edev->cdev, + fltr->tuple.mode); + edev->arfs->mode = fltr->tuple.mode; } return 0; @@ -167,14 +193,15 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, fltr->buf_len, DMA_TO_DEVICE); qede_free_arfs_filter(edev, fltr); - edev->arfs->filter_count--; - if (!edev->arfs->filter_count && edev->arfs->enable) { + edev->arfs->filter_count--; + if (!edev->arfs->filter_count && + edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) { enum qed_filter_config_mode mode; mode = QED_FILTER_CONFIG_MODE_DISABLE; - edev->arfs->enable = false; edev->ops->configure_arfs_searcher(edev->cdev, mode); + edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE; } } @@ -264,25 +291,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) } } +#ifdef CONFIG_RFS_ACCEL spin_lock_bh(&edev->arfs->arfs_list_lock); - if (!edev->arfs->filter_count) { - if (edev->arfs->enable) { - enum qed_filter_config_mode mode; - - mode = QED_FILTER_CONFIG_MODE_DISABLE; - edev->arfs->enable = false; - edev->ops->configure_arfs_searcher(edev->cdev, mode); - } -#ifdef CONFIG_RFS_ACCEL - } else { + if (edev->arfs->filter_count) { set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, QEDE_SP_TASK_POLL_DELAY); -#endif } spin_unlock_bh(&edev->arfs->arfs_list_lock); +#endif } /* This function waits until all aRFS filters get deleted and freed. @@ -512,6 +531,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, eth->h_proto = skb->protocol; n->tuple.eth_proto = skb->protocol; n->tuple.ip_proto = ip_proto; + n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE; memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); @@ -550,8 +570,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced) __qede_lock(edev); - /* MAC hints take effect only if we haven't set one already */ - if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced) { + if (!is_valid_ether_addr(mac)) { __qede_unlock(edev); return; } @@ -1161,6 +1180,10 @@ int qede_set_mac_addr(struct net_device *ndev, void *p) if (edev->state != QEDE_STATE_OPEN) { DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "The device is currently down\n"); + /* Ask PF to explicitly update a copy in bulletin board */ + if (IS_VF(edev) && edev->ops->req_bulletin_update_mac) + edev->ops->req_bulletin_update_mac(edev->cdev, + ndev->dev_addr); goto out; } @@ -1336,38 +1359,6 @@ qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) return NULL; } -static bool -qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos, - struct ethtool_rx_flow_spec *fsp, - __be16 proto) -{ - if (proto == htons(ETH_P_IP)) { - struct ethtool_tcpip4_spec *ip; - - ip = &fsp->h_u.tcp_ip4_spec; - - if (tpos->tuple.src_ipv4 == ip->ip4src && - tpos->tuple.dst_ipv4 == ip->ip4dst) - return true; - else - return false; - } else { - struct ethtool_tcpip6_spec *ip6; - struct in6_addr *src; - - ip6 = &fsp->h_u.tcp_ip6_spec; - src = &tpos->tuple.src_ipv6; - - if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) && - !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst, - sizeof(struct in6_addr))) - return true; - else - return false; - } - return false; -} - int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, u32 *rule_locs) { @@ -1452,102 +1443,444 @@ int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) fsp->ring_cookie = fltr->rxq_id; + if (fltr->vfid) { + fsp->ring_cookie |= ((u64)fltr->vfid) << + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + } + + if (fltr->b_is_drop) + fsp->ring_cookie = RX_CLS_FLOW_DISC; unlock: __qede_unlock(edev); return rc; } static int -qede_validate_and_check_flow_exist(struct qede_dev *edev, - struct ethtool_rx_flow_spec *fsp, - int *min_hlen) +qede_poll_arfs_filter_config(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) { - __be16 src_port = 0x0, dst_port = 0x0; - struct qede_arfs_fltr_node *fltr; - struct hlist_node *temp; - struct hlist_head *head; - __be16 eth_proto; - u8 ip_proto; + int count = QEDE_ARFS_POLL_COUNT; - if (fsp->location >= QEDE_RFS_MAX_FLTR || - fsp->ring_cookie >= QEDE_RSS_COUNT(edev)) - return -EINVAL; + while (fltr->used && count) { + msleep(20); + count--; + } + + if (count == 0 || fltr->fw_rc) { + DP_NOTICE(edev, "Timeout in polling filter config\n"); + qede_dequeue_fltr_and_config_searcher(edev, fltr); + return -EIO; + } + + return fltr->fw_rc; +} + +static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t) +{ + int size = ETH_HLEN; + + if (t->eth_proto == htons(ETH_P_IP)) + size += sizeof(struct iphdr); + else + size += sizeof(struct ipv6hdr); + + if (t->ip_proto == IPPROTO_TCP) + size += sizeof(struct tcphdr); + else + size += sizeof(struct udphdr); + + return size; +} + +static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a, + struct qede_arfs_tuple *b) +{ + if (a->eth_proto != htons(ETH_P_IP) || + b->eth_proto != htons(ETH_P_IP)) + return false; + + return (a->src_ipv4 == b->src_ipv4) && + (a->dst_ipv4 == b->dst_ipv4); +} + +static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t, + void *header) +{ + __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr)); + struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN); + struct ethhdr *eth = (struct ethhdr *)header; + + eth->h_proto = t->eth_proto; + ip->saddr = t->src_ipv4; + ip->daddr = t->dst_ipv4; + ip->version = 0x4; + ip->ihl = 0x5; + ip->protocol = t->ip_proto; + ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN); + + /* ports is weakly typed to suit both TCP and UDP ports */ + ports[0] = t->src_port; + ports[1] = t->dst_port; +} + +static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t, + void *buffer) +{ + const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP"; + + snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN, + "%s %pI4 (%04x) -> %pI4 (%04x)", + prefix, &t->src_ipv4, t->src_port, + &t->dst_ipv4, t->dst_port); +} + +static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a, + struct qede_arfs_tuple *b) +{ + if (a->eth_proto != htons(ETH_P_IPV6) || + b->eth_proto != htons(ETH_P_IPV6)) + return false; + + if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr))) + return false; - if (fsp->flow_type == TCP_V4_FLOW) { - *min_hlen += sizeof(struct iphdr) + - sizeof(struct tcphdr); - eth_proto = htons(ETH_P_IP); - ip_proto = IPPROTO_TCP; - } else if (fsp->flow_type == UDP_V4_FLOW) { - *min_hlen += sizeof(struct iphdr) + - sizeof(struct udphdr); - eth_proto = htons(ETH_P_IP); - ip_proto = IPPROTO_UDP; - } else if (fsp->flow_type == TCP_V6_FLOW) { - *min_hlen += sizeof(struct ipv6hdr) + - sizeof(struct tcphdr); - eth_proto = htons(ETH_P_IPV6); - ip_proto = IPPROTO_TCP; - } else if (fsp->flow_type == UDP_V6_FLOW) { - *min_hlen += sizeof(struct ipv6hdr) + - sizeof(struct udphdr); - eth_proto = htons(ETH_P_IPV6); - ip_proto = IPPROTO_UDP; + if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr))) + return false; + + return true; +} + +static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t, + void *header) +{ + __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr)); + struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN); + struct ethhdr *eth = (struct ethhdr *)header; + + eth->h_proto = t->eth_proto; + memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr)); + memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr)); + ip6->version = 0x6; + + if (t->ip_proto == IPPROTO_TCP) { + ip6->nexthdr = NEXTHDR_TCP; + ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); } else { - DP_NOTICE(edev, "Unsupported flow type = 0x%x\n", - fsp->flow_type); - return -EPROTONOSUPPORT; + ip6->nexthdr = NEXTHDR_UDP; + ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); } - if (eth_proto == htons(ETH_P_IP)) { - src_port = fsp->h_u.tcp_ip4_spec.psrc; - dst_port = fsp->h_u.tcp_ip4_spec.pdst; + /* ports is weakly typed to suit both TCP and UDP ports */ + ports[0] = t->src_port; + ports[1] = t->dst_port; +} + +/* Validate fields which are set and not accepted by the driver */ +static int qede_flow_spec_validate_unused(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fs) +{ + if (fs->flow_type & FLOW_MAC_EXT) { + DP_INFO(edev, "Don't support MAC extensions\n"); + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) { + DP_INFO(edev, "Don't support vlan-based classification\n"); + return -EOPNOTSUPP; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->h_ext.data[0] || fs->h_ext.data[1])) { + DP_INFO(edev, "Don't support user defined data\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + if ((fs->h_u.tcp_ip4_spec.ip4src & + fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) { + DP_INFO(edev, "Don't support IP-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.ip4dst & + fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) { + DP_INFO(edev, "Don't support IP-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.psrc & + fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip4_spec.pdst & + fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if (fs->h_u.tcp_ip4_spec.tos) { + DP_INFO(edev, "Don't support tos\n"); + return -EOPNOTSUPP; + } + + t->eth_proto = htons(ETH_P_IP); + t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src; + t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst; + t->src_port = fs->h_u.tcp_ip4_spec.psrc; + t->dst_port = fs->h_u.tcp_ip4_spec.pdst; + + /* We must either have a valid 4-tuple or only dst port + * or only src ip as an input + */ + if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !t->src_ipv4 && !t->dst_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !t->dst_ipv4 && t->src_ipv4) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; } else { - src_port = fsp->h_u.tcp_ip6_spec.psrc; - dst_port = fsp->h_u.tcp_ip6_spec.pdst; + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; } - head = QEDE_ARFS_BUCKET_HEAD(edev, 0); - hlist_for_each_entry_safe(fltr, temp, head, node) { - if ((fltr->tuple.ip_proto == ip_proto && - fltr->tuple.eth_proto == eth_proto && - qede_compare_user_flow_ips(fltr, fsp, eth_proto) && - fltr->tuple.src_port == src_port && - fltr->tuple.dst_port == dst_port) || - fltr->sw_id == fsp->location) - return -EEXIST; + t->ip_comp = qede_flow_spec_ipv4_cmp; + t->build_hdr = qede_flow_build_ipv4_hdr; + t->stringify = qede_flow_stringify_ipv4_hdr; + + return 0; +} + +static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_TCP; + + if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_UDP; + + if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + struct in6_addr zero_addr; + void *p; + + p = &zero_addr; + memset(p, 0, sizeof(zero_addr)); + + if ((fs->h_u.tcp_ip6_spec.psrc & + fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if ((fs->h_u.tcp_ip6_spec.pdst & + fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) { + DP_INFO(edev, "Don't support port-masks\n"); + return -EOPNOTSUPP; + } + + if (fs->h_u.tcp_ip6_spec.tclass) { + DP_INFO(edev, "Don't support tclass\n"); + return -EOPNOTSUPP; } + t->eth_proto = htons(ETH_P_IPV6); + memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + t->src_port = fs->h_u.tcp_ip6_spec.psrc; + t->dst_port = fs->h_u.tcp_ip6_spec.pdst; + + /* We must make sure we have a valid 4-tuple or only dest port + * or only src ip as an input + */ + if (t->src_port && t->dst_port && + memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && + memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + } else if (!t->src_port && t->dst_port && + !memcmp(&t->src_ipv6, p, sizeof(struct in6_addr)) && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_L4_PORT; + } else if (!t->src_port && !t->dst_port && + !memcmp(&t->dst_ipv6, p, sizeof(struct in6_addr)) && + memcmp(&t->src_ipv6, p, sizeof(struct in6_addr))) { + t->mode = QED_FILTER_CONFIG_MODE_IP_SRC; + } else { + DP_INFO(edev, "Invalid N-tuple\n"); + return -EOPNOTSUPP; + } + + t->ip_comp = qede_flow_spec_ipv6_cmp; + t->build_hdr = qede_flow_build_ipv6_hdr; + return 0; } -static int -qede_poll_arfs_filter_config(struct qede_dev *edev, - struct qede_arfs_fltr_node *fltr) +static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) { - int count = QEDE_ARFS_POLL_COUNT; + t->ip_proto = IPPROTO_TCP; - while (fltr->used && count) { - msleep(20); - count--; + if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + t->ip_proto = IPPROTO_UDP; + + if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_tuple(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + memset(t, 0, sizeof(*t)); + + if (qede_flow_spec_validate_unused(edev, fs)) + return -EOPNOTSUPP; + + switch ((fs->flow_type & ~FLOW_EXT)) { + case TCP_V4_FLOW: + return qede_flow_spec_to_tuple_tcpv4(edev, t, fs); + case UDP_V4_FLOW: + return qede_flow_spec_to_tuple_udpv4(edev, t, fs); + case TCP_V6_FLOW: + return qede_flow_spec_to_tuple_tcpv6(edev, t, fs); + case UDP_V6_FLOW: + return qede_flow_spec_to_tuple_udpv6(edev, t, fs); + default: + DP_VERBOSE(edev, NETIF_MSG_IFUP, + "Can't support flow of type %08x\n", fs->flow_type); + return -EOPNOTSUPP; } - if (count == 0 || fltr->fw_rc) { - qede_dequeue_fltr_and_config_searcher(edev, fltr); - return -EIO; + return 0; +} + +static int qede_flow_spec_validate(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fs, + struct qede_arfs_tuple *t) +{ + if (fs->location >= QEDE_RFS_MAX_FLTR) { + DP_INFO(edev, "Location out-of-bounds\n"); + return -EINVAL; } - return fltr->fw_rc; + /* Check location isn't already in use */ + if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) { + DP_INFO(edev, "Location already in use\n"); + return -EINVAL; + } + + /* Check if the filtering-mode could support the filter */ + if (edev->arfs->filter_count && + edev->arfs->mode != t->mode) { + DP_INFO(edev, + "flow_spec would require filtering mode %08x, but %08x is configured\n", + t->mode, edev->arfs->filter_count); + return -EINVAL; + } + + /* If drop requested then no need to validate other data */ + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + return 0; + + if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) + return 0; + + if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { + DP_INFO(edev, "Queue out-of-bounds\n"); + return -EINVAL; + } + + return 0; +} + +/* Must be called while qede lock is held */ +static struct qede_arfs_fltr_node * +qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) +{ + struct qede_arfs_fltr_node *fltr; + struct hlist_node *temp; + struct hlist_head *head; + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + + hlist_for_each_entry_safe(fltr, temp, head, node) { + if (fltr->tuple.ip_proto == t->ip_proto && + fltr->tuple.src_port == t->src_port && + fltr->tuple.dst_port == t->dst_port && + t->ip_comp(&fltr->tuple, t)) + return fltr; + } + + return NULL; +} + +static void qede_flow_set_destination(struct qede_dev *edev, + struct qede_arfs_fltr_node *n, + struct ethtool_rx_flow_spec *fs) +{ + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + n->b_is_drop = true; + return; + } + + n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie); + n->next_rxq_id = n->rxq_id; + + if (n->vfid) + DP_VERBOSE(edev, QED_MSG_SP, + "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); } int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) { struct ethtool_rx_flow_spec *fsp = &info->fs; struct qede_arfs_fltr_node *n; - int min_hlen = ETH_HLEN, rc; - struct ethhdr *eth; - struct iphdr *ip; - __be16 *ports; + struct qede_arfs_tuple t; + int min_hlen, rc; __qede_lock(edev); @@ -1556,16 +1889,28 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) goto unlock; } - rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen); + /* Translate the flow specification into something fittign our DB */ + rc = qede_flow_spec_to_tuple(edev, &t, fsp); if (rc) goto unlock; + /* Make sure location is valid and filter isn't already set */ + rc = qede_flow_spec_validate(edev, fsp, &t); + if (rc) + goto unlock; + + if (qede_flow_find_fltr(edev, &t)) { + rc = -EINVAL; + goto unlock; + } + n = kzalloc(sizeof(*n), GFP_KERNEL); if (!n) { rc = -ENOMEM; goto unlock; } + min_hlen = qede_flow_get_min_header_size(&t); n->data = kzalloc(min_hlen, GFP_KERNEL); if (!n->data) { kfree(n); @@ -1576,68 +1921,13 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) n->sw_id = fsp->location; set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); n->buf_len = min_hlen; - n->rxq_id = fsp->ring_cookie; - n->next_rxq_id = n->rxq_id; - eth = (struct ethhdr *)n->data; - if (info->fs.flow_type == TCP_V4_FLOW || - info->fs.flow_type == UDP_V4_FLOW) { - ports = (__be16 *)(n->data + ETH_HLEN + - sizeof(struct iphdr)); - eth->h_proto = htons(ETH_P_IP); - n->tuple.eth_proto = htons(ETH_P_IP); - n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src; - n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst; - n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc; - n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst; - ports[0] = n->tuple.src_port; - ports[1] = n->tuple.dst_port; - ip = (struct iphdr *)(n->data + ETH_HLEN); - ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src; - ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst; - ip->version = 0x4; - ip->ihl = 0x5; - - if (info->fs.flow_type == TCP_V4_FLOW) { - n->tuple.ip_proto = IPPROTO_TCP; - ip->protocol = IPPROTO_TCP; - } else { - n->tuple.ip_proto = IPPROTO_UDP; - ip->protocol = IPPROTO_UDP; - } - ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN); - } else { - struct ipv6hdr *ip6; - - ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN); - ports = (__be16 *)(n->data + ETH_HLEN + - sizeof(struct ipv6hdr)); - eth->h_proto = htons(ETH_P_IPV6); - n->tuple.eth_proto = htons(ETH_P_IPV6); - memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src, - sizeof(struct in6_addr)); - memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst, - sizeof(struct in6_addr)); - n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc; - n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst; - ports[0] = n->tuple.src_port; - ports[1] = n->tuple.dst_port; - memcpy(&ip6->saddr, &n->tuple.src_ipv6, - sizeof(struct in6_addr)); - memcpy(&ip6->daddr, &n->tuple.dst_ipv6, - sizeof(struct in6_addr)); - ip6->version = 0x6; + memcpy(&n->tuple, &t, sizeof(n->tuple)); - if (info->fs.flow_type == TCP_V6_FLOW) { - n->tuple.ip_proto = IPPROTO_TCP; - ip6->nexthdr = NEXTHDR_TCP; - ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); - } else { - n->tuple.ip_proto = IPPROTO_UDP; - ip6->nexthdr = NEXTHDR_UDP; - ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); - } - } + qede_flow_set_destination(edev, n, fsp); + + /* Build a minimal header according to the flow */ + n->tuple.build_hdr(&n->tuple, n->data); rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); if (rc) @@ -1647,6 +1937,7 @@ int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) rc = qede_poll_arfs_filter_config(edev, n); unlock: __qede_unlock(edev); + return rc; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 14941303189d..6c702399b801 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -660,7 +660,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev, /* Add one frag and update the appropriate fields in the skb */ skb_fill_page_desc(skb, tpa_info->frag_id++, - current_bd->data, current_bd->page_offset, + current_bd->data, + current_bd->page_offset + rxq->rx_headroom, len_on_bd); if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) { @@ -671,8 +672,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev, goto out; } - qed_chain_consume(&rxq->rx_bd_ring); - rxq->sw_rx_cons++; + qede_rx_bd_ring_consume(rxq); skb->data_len += len_on_bd; skb->truesize += rxq->rx_buf_seg_size; @@ -721,64 +721,129 @@ static u8 qede_check_tunn_csum(u16 flag) return QEDE_CSUM_UNNECESSARY | tcsum; } +static inline struct sk_buff * +qede_build_skb(struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad) +{ + struct sk_buff *skb; + void *buf; + + buf = page_address(bd->data) + bd->page_offset; + skb = build_skb(buf, rxq->rx_buf_seg_size); + + skb_reserve(skb, pad); + skb_put(skb, len); + + return skb; +} + +static struct sk_buff * +qede_tpa_rx_build_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad, + bool alloc_skb) +{ + struct sk_buff *skb; + + skb = qede_build_skb(rxq, bd, len, pad); + bd->page_offset += rxq->rx_buf_seg_size; + + if (bd->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { + DP_NOTICE(edev, + "Failed to allocate RX buffer for tpa start\n"); + bd->page_offset -= rxq->rx_buf_seg_size; + page_ref_inc(bd->data); + dev_kfree_skb_any(skb); + return NULL; + } + } else { + page_ref_inc(bd->data); + qede_reuse_page(rxq, bd); + } + + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + + return skb; +} + +static struct sk_buff * +qede_rx_build_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *bd, u16 len, u16 pad) +{ + struct sk_buff *skb = NULL; + + /* For smaller frames still need to allocate skb, memcpy + * data and benefit in reusing the page segment instead of + * un-mapping it. + */ + if ((len + pad <= edev->rx_copybreak)) { + unsigned int offset = bd->page_offset + pad; + + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, pad); + memcpy(skb_put(skb, len), + page_address(bd->data) + offset, len); + qede_reuse_page(rxq, bd); + goto out; + } + + skb = qede_build_skb(rxq, bd, len, pad); + + if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { + /* Incr page ref count to reuse on allocation failure so + * that it doesn't get freed while freeing SKB [as its + * already mapped there]. + */ + page_ref_inc(bd->data); + dev_kfree_skb_any(skb); + return NULL; + } +out: + /* We've consumed the first BD and prepared an SKB */ + qede_rx_bd_ring_consume(rxq); + + return skb; +} + static void qede_tpa_start(struct qede_dev *edev, struct qede_rx_queue *rxq, struct eth_fast_path_rx_tpa_start_cqe *cqe) { struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); - struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *replace_buf = &tpa_info->buffer; - dma_addr_t mapping = tpa_info->buffer_mapping; struct sw_rx_data *sw_rx_data_cons; - struct sw_rx_data *sw_rx_data_prod; + u16 pad; sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + pad = cqe->placement_offset + rxq->rx_headroom; - /* Use pre-allocated replacement buffer - we can't release the agg. - * start until its over and we don't want to risk allocation failing - * here, so re-allocate when aggregation will be over. - */ - sw_rx_data_prod->mapping = replace_buf->mapping; - - sw_rx_data_prod->data = replace_buf->data; - rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); - rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); - sw_rx_data_prod->page_offset = replace_buf->page_offset; - - rxq->sw_rx_prod++; + tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, + le16_to_cpu(cqe->len_on_first_bd), + pad, false); + tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; + tpa_info->buffer.mapping = sw_rx_data_cons->mapping; - /* move partial skb from cons to pool (don't unmap yet) - * save mapping, incase we drop the packet later on. - */ - tpa_info->buffer = *sw_rx_data_cons; - mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), - le32_to_cpu(rx_bd_cons->addr.lo)); - - tpa_info->buffer_mapping = mapping; - rxq->sw_rx_cons++; - - /* set tpa state to start only if we are able to allocate skb - * for this aggregation, otherwise mark as error and aggregation will - * be dropped - */ - tpa_info->skb = netdev_alloc_skb(edev->ndev, - le16_to_cpu(cqe->len_on_first_bd)); if (unlikely(!tpa_info->skb)) { DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); + + /* Consume from ring but do not produce since + * this might be used by FW still, it will be re-used + * at TPA end. + */ + tpa_info->tpa_start_fail = true; + qede_rx_bd_ring_consume(rxq); tpa_info->state = QEDE_AGG_STATE_ERROR; goto cons_buf; } - /* Start filling in the aggregation info */ - skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); tpa_info->frag_id = 0; tpa_info->state = QEDE_AGG_STATE_START; - /* Store some information from first CQE */ - tpa_info->start_cqe_placement_offset = cqe->placement_offset; - tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd); if ((le16_to_cpu(cqe->pars_flags.flags) >> PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) @@ -899,6 +964,10 @@ static int qede_tpa_end(struct qede_dev *edev, tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; skb = tpa_info->skb; + if (tpa_info->buffer.page_offset == PAGE_SIZE) + dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, + PAGE_SIZE, rxq->data_direction); + for (i = 0; cqe->len_list[i]; i++) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->len_list[i])); @@ -919,11 +988,6 @@ static int qede_tpa_end(struct qede_dev *edev, "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", le16_to_cpu(cqe->total_packet_len), skb->len); - memcpy(skb->data, - page_address(tpa_info->buffer.data) + - tpa_info->start_cqe_placement_offset + - tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len); - /* Finalize the SKB */ skb->protocol = eth_type_trans(skb, edev->ndev); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -940,6 +1004,12 @@ static int qede_tpa_end(struct qede_dev *edev, return 1; err: tpa_info->state = QEDE_AGG_STATE_NONE; + + if (tpa_info->tpa_start_fail) { + qede_reuse_page(rxq, &tpa_info->buffer); + tpa_info->tpa_start_fail = false; + } + dev_kfree_skb_any(tpa_info->skb); tpa_info->skb = NULL; return 0; @@ -1058,65 +1128,6 @@ static bool qede_rx_xdp(struct qede_dev *edev, return false; } -static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev, - struct qede_rx_queue *rxq, - struct sw_rx_data *bd, u16 len, - u16 pad) -{ - unsigned int offset = bd->page_offset + pad; - struct skb_frag_struct *frag; - struct page *page = bd->data; - unsigned int pull_len; - struct sk_buff *skb; - unsigned char *va; - - /* Allocate a new SKB with a sufficient large header len */ - skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); - if (unlikely(!skb)) - return NULL; - - /* Copy data into SKB - if it's small, we can simply copy it and - * re-use the already allcoated & mapped memory. - */ - if (len + pad <= edev->rx_copybreak) { - skb_put_data(skb, page_address(page) + offset, len); - qede_reuse_page(rxq, bd); - goto out; - } - - frag = &skb_shinfo(skb)->frags[0]; - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, offset, len, rxq->rx_buf_seg_size); - - va = skb_frag_address(frag); - pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); - - /* Align the pull_len to optimize memcpy */ - memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); - - /* Correct the skb & frag sizes offset after the pull */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - if (unlikely(qede_realloc_rx_buffer(rxq, bd))) { - /* Incr page ref count to reuse on allocation failure so - * that it doesn't get freed while freeing SKB [as its - * already mapped there]. - */ - page_ref_inc(page); - dev_kfree_skb_any(skb); - return NULL; - } - -out: - /* We've consumed the first BD and prepared an SKB */ - qede_rx_bd_ring_consume(rxq); - return skb; -} - static int qede_rx_build_jumbo(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sk_buff *skb, @@ -1157,7 +1168,7 @@ static int qede_rx_build_jumbo(struct qede_dev *edev, PAGE_SIZE, DMA_FROM_DEVICE); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, - bd->data, 0, cur_size); + bd->data, rxq->rx_headroom, cur_size); skb->truesize += PAGE_SIZE; skb->data_len += cur_size; @@ -1256,7 +1267,7 @@ static int qede_rx_process_cqe(struct qede_dev *edev, /* Basic validation passed; Need to prepare an SKB. This would also * guarantee to finally consume the first BD upon success. */ - skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad); + skb = qede_rx_build_skb(edev, rxq, bd, len, pad); if (!skb) { rxq->rx_alloc_errors++; qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f6655e251bbd..6a796040a32c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -133,6 +133,9 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qede_remove(struct pci_dev *pdev); static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); +static void qede_get_eth_tlv_data(void *edev, void *data); +static void qede_get_generic_tlv_data(void *edev, + struct qed_generic_tlvs *data); /* The qede lock is used to protect driver state change and driver flows that * are not reentrant. @@ -199,7 +202,7 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) /* Enable/Disable Tx switching for PF */ if ((rc == num_vfs_param) && netif_running(edev->ndev) && - qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { + !qed_info->b_inter_pf_switch && qed_info->tx_switching) { vport_params->vport_id = 0; vport_params->update_tx_switching_flg = 1; vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; @@ -228,6 +231,8 @@ static struct qed_eth_cb_ops qede_ll_ops = { .arfs_filter_op = qede_arfs_filter_op, #endif .link_update = qede_link_update, + .get_generic_tlv_data = qede_get_generic_tlv_data, + .get_protocol_tlv_data = qede_get_eth_tlv_data, }, .force_mac = qede_force_mac, .ports_update = qede_udp_ports_update, @@ -342,6 +347,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts; p_common->mftag_filter_discards = stats.common.mftag_filter_discards; p_common->mac_filter_discards = stats.common.mac_filter_discards; + p_common->gft_filter_drop = stats.common.gft_filter_drop; p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes; p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes; @@ -393,6 +399,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) p_common->brb_truncates = stats.common.brb_truncates; p_common->brb_discards = stats.common.brb_discards; p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames; + p_common->link_change_count = stats.common.link_change_count; if (QEDE_IS_BB(edev)) { struct qede_stats_bb *p_bb = &edev->stats.bb; @@ -1196,30 +1203,8 @@ static void qede_free_rx_buffers(struct qede_dev *edev, } } -static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) -{ - int i; - - if (edev->gro_disable) - return; - - for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { - struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->buffer; - - if (replace_buf->data) { - dma_unmap_page(&edev->pdev->dev, - replace_buf->mapping, - PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(replace_buf->data); - } - } -} - static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { - qede_free_sge_mem(edev, rxq); - /* Free rx buffers */ qede_free_rx_buffers(edev, rxq); @@ -1231,45 +1216,15 @@ static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); } -static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) +static void qede_set_tpa_param(struct qede_rx_queue *rxq) { - dma_addr_t mapping; int i; - if (edev->gro_disable) - return 0; - for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; - struct sw_rx_data *replace_buf = &tpa_info->buffer; - - replace_buf->data = alloc_pages(GFP_ATOMIC, 0); - if (unlikely(!replace_buf->data)) { - DP_NOTICE(edev, - "Failed to allocate TPA skb pool [replacement buffer]\n"); - goto err; - } - - mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - DP_NOTICE(edev, - "Failed to map TPA replacement buffer\n"); - goto err; - } - replace_buf->mapping = mapping; - tpa_info->buffer.page_offset = 0; - tpa_info->buffer_mapping = mapping; tpa_info->state = QEDE_AGG_STATE_NONE; } - - return 0; -err: - qede_free_sge_mem(edev, rxq); - edev->gro_disable = 1; - edev->ndev->features &= ~NETIF_F_GRO_HW; - return -ENOMEM; } /* This function allocates all memory needed per Rx queue */ @@ -1280,19 +1235,24 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) rxq->num_rx_buffers = edev->q_num_rx_buffers; rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu; - rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0; + + rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD; + size = rxq->rx_headroom + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); /* Make sure that the headroom and payload fit in a single page */ - if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE) - rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom; + if (rxq->rx_buf_size + size > PAGE_SIZE) + rxq->rx_buf_size = PAGE_SIZE - size; - /* Segment size to spilt a page in multiple equal parts, + /* Segment size to spilt a page in multiple equal parts , * unless XDP is used in which case we'd use the entire page. */ - if (!edev->xdp_prog) - rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); - else + if (!edev->xdp_prog) { + size = size + rxq->rx_buf_size; + rxq->rx_buf_seg_size = roundup_pow_of_two(size); + } else { rxq->rx_buf_seg_size = PAGE_SIZE; + } /* Allocate the parallel driver ring for Rx buffers */ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; @@ -1336,7 +1296,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } } - rc = qede_alloc_sge_mem(edev, rxq); + if (!edev->gro_disable) + qede_set_tpa_param(rxq); err: return rc; } @@ -1927,7 +1888,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) vport_update_params->update_vport_active_flg = 1; vport_update_params->vport_active_flg = 1; - if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && + if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && qed_info->tx_switching) { vport_update_params->update_tx_switching_flg = 1; vport_update_params->tx_switching_flg = 1; @@ -2177,3 +2138,99 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } } + +static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) +{ + struct netdev_queue *netdev_txq; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); + if (netif_xmit_stopped(netdev_txq)) + return true; + + return false; +} + +static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) +{ + struct qede_dev *edev = dev; + struct netdev_hw_addr *ha; + int i; + + if (edev->ndev->features & NETIF_F_IP_CSUM) + data->feat_flags |= QED_TLV_IP_CSUM; + if (edev->ndev->features & NETIF_F_TSO) + data->feat_flags |= QED_TLV_LSO; + + ether_addr_copy(data->mac[0], edev->ndev->dev_addr); + memset(data->mac[1], 0, ETH_ALEN); + memset(data->mac[2], 0, ETH_ALEN); + /* Copy the first two UC macs */ + netif_addr_lock_bh(edev->ndev); + i = 1; + netdev_for_each_uc_addr(ha, edev->ndev) { + ether_addr_copy(data->mac[i++], ha->addr); + if (i == QED_TLV_MAC_COUNT) + break; + } + + netif_addr_unlock_bh(edev->ndev); +} + +static void qede_get_eth_tlv_data(void *dev, void *data) +{ + struct qed_mfw_tlv_eth *etlv = data; + struct qede_dev *edev = dev; + struct qede_fastpath *fp; + int i; + + etlv->lso_maxoff_size = 0XFFFF; + etlv->lso_maxoff_size_set = true; + etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN; + etlv->lso_minseg_size_set = true; + etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC); + etlv->prom_mode_set = true; + etlv->tx_descr_size = QEDE_TSS_COUNT(edev); + etlv->tx_descr_size_set = true; + etlv->rx_descr_size = QEDE_RSS_COUNT(edev); + etlv->rx_descr_size_set = true; + etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB; + etlv->iov_offload_set = true; + + /* Fill information regarding queues; Should be done under the qede + * lock to guarantee those don't change beneath our feet. + */ + etlv->txqs_empty = true; + etlv->rxqs_empty = true; + etlv->num_txqs_full = 0; + etlv->num_rxqs_full = 0; + + __qede_lock(edev); + for_each_queue(i) { + fp = &edev->fp_array[i]; + if (fp->type & QEDE_FASTPATH_TX) { + if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod) + etlv->txqs_empty = false; + if (qede_is_txq_full(edev, fp->txq)) + etlv->num_txqs_full++; + } + if (fp->type & QEDE_FASTPATH_RX) { + if (qede_has_rx_work(fp->rxq)) + etlv->rxqs_empty = false; + + /* This one is a bit tricky; Firmware might stop + * placing packets if ring is not yet full. + * Give an approximation. + */ + if (le16_to_cpu(*fp->rxq->hw_cons_ptr) - + qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) > + RX_RING_SIZE - 100) + etlv->num_rxqs_full++; + } + } + __qede_unlock(edev); + + etlv->txqs_empty_set = true; + etlv->rxqs_empty_set = true; + etlv->num_txqs_full_set = true; + etlv->num_rxqs_full_set = true; +} diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 8293c2028002..70de062b72a1 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -2211,7 +2211,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; @@ -2258,7 +2258,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) while (prod != rx_ring->cnsmr_idx) { netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, - "cq_id = %d, prod = %d, cnsmr = %d.\n.", + "cq_id = %d, prod = %d, cnsmr = %d\n", rx_ring->cq_id, prod, rx_ring->cnsmr_idx); net_rsp = rx_ring->curr_entry; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index d5a32b7c7dc5..031f6e6ee9c1 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -683,10 +683,11 @@ static int emac_tx_q_desc_alloc(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) { struct emac_ring_header *ring_header = &adpt->ring_header; + int node = dev_to_node(adpt->netdev->dev.parent); size_t size; size = sizeof(struct emac_buffer) * tx_q->tpd.count; - tx_q->tpd.tpbuff = kzalloc(size, GFP_KERNEL); + tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node); if (!tx_q->tpd.tpbuff) return -ENOMEM; @@ -723,11 +724,12 @@ static void emac_rx_q_bufs_free(struct emac_adapter *adpt) static int emac_rx_descs_alloc(struct emac_adapter *adpt) { struct emac_ring_header *ring_header = &adpt->ring_header; + int node = dev_to_node(adpt->netdev->dev.parent); struct emac_rx_queue *rx_q = &adpt->rx_q; size_t size; size = sizeof(struct emac_buffer) * rx_q->rfd.count; - rx_q->rfd.rfbuff = kzalloc(size, GFP_KERNEL); + rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node); if (!rx_q->rfd.rfbuff) return -ENOMEM; @@ -920,14 +922,13 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, static void emac_adjust_link(struct net_device *netdev) { struct emac_adapter *adpt = netdev_priv(netdev); - struct emac_sgmii *sgmii = &adpt->phy; struct phy_device *phydev = netdev->phydev; if (phydev->link) { emac_mac_start(adpt); - sgmii->link_up(adpt); + emac_sgmii_link_change(adpt, true); } else { - sgmii->link_down(adpt); + emac_sgmii_link_change(adpt, false); emac_mac_stop(adpt); } diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index e8ab512ee7e3..e78e5db39458 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -53,6 +53,46 @@ #define SERDES_START_WAIT_TIMES 100 +int emac_sgmii_init(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->init)) + return 0; + + return adpt->phy.sgmii_ops->init(adpt); +} + +int emac_sgmii_open(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->open)) + return 0; + + return adpt->phy.sgmii_ops->open(adpt); +} + +void emac_sgmii_close(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->close)) + return; + + adpt->phy.sgmii_ops->close(adpt); +} + +int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->link_change)) + return 0; + + return adpt->phy.sgmii_ops->link_change(adpt, link_state); +} + +void emac_sgmii_reset(struct emac_adapter *adpt) +{ + if (!(adpt->phy.sgmii_ops && adpt->phy.sgmii_ops->reset)) + return; + + adpt->phy.sgmii_ops->reset(adpt); +} + /* Initialize the SGMII link between the internal and external PHYs. */ static void emac_sgmii_link_init(struct emac_adapter *adpt) { @@ -163,21 +203,21 @@ static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) msleep(50); } -void emac_sgmii_reset(struct emac_adapter *adpt) +static void emac_sgmii_common_reset(struct emac_adapter *adpt) { int ret; emac_sgmii_reset_prepare(adpt); emac_sgmii_link_init(adpt); - ret = adpt->phy.initialize(adpt); + ret = emac_sgmii_init(adpt); if (ret) netdev_err(adpt->netdev, "could not reinitialize internal PHY (error=%i)\n", ret); } -static int emac_sgmii_open(struct emac_adapter *adpt) +static int emac_sgmii_common_open(struct emac_adapter *adpt) { struct emac_sgmii *sgmii = &adpt->phy; int ret; @@ -201,43 +241,63 @@ static int emac_sgmii_open(struct emac_adapter *adpt) return 0; } -static int emac_sgmii_close(struct emac_adapter *adpt) +static void emac_sgmii_common_close(struct emac_adapter *adpt) { struct emac_sgmii *sgmii = &adpt->phy; /* Make sure interrupts are disabled */ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); free_irq(sgmii->irq, adpt); - - return 0; } /* The error interrupts are only valid after the link is up */ -static int emac_sgmii_link_up(struct emac_adapter *adpt) +static int emac_sgmii_common_link_change(struct emac_adapter *adpt, bool linkup) { struct emac_sgmii *sgmii = &adpt->phy; int ret; - /* Clear and enable interrupts */ - ret = emac_sgmii_irq_clear(adpt, 0xff); - if (ret) - return ret; + if (linkup) { + /* Clear and enable interrupts */ + ret = emac_sgmii_irq_clear(adpt, 0xff); + if (ret) + return ret; - writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + writel(SGMII_ISR_MASK, + sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + } else { + /* Disable interrupts */ + writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + synchronize_irq(sgmii->irq); + } return 0; } -static int emac_sgmii_link_down(struct emac_adapter *adpt) -{ - struct emac_sgmii *sgmii = &adpt->phy; +static struct sgmii_ops fsm9900_ops = { + .init = emac_sgmii_init_fsm9900, + .open = emac_sgmii_common_open, + .close = emac_sgmii_common_close, + .link_change = emac_sgmii_common_link_change, + .reset = emac_sgmii_common_reset, +}; - /* Disable interrupts */ - writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); - synchronize_irq(sgmii->irq); +static struct sgmii_ops qdf2432_ops = { + .init = emac_sgmii_init_qdf2432, + .open = emac_sgmii_common_open, + .close = emac_sgmii_common_close, + .link_change = emac_sgmii_common_link_change, + .reset = emac_sgmii_common_reset, +}; - return 0; -} +#ifdef CONFIG_ACPI +static struct sgmii_ops qdf2400_ops = { + .init = emac_sgmii_init_qdf2400, + .open = emac_sgmii_common_open, + .close = emac_sgmii_common_close, + .link_change = emac_sgmii_common_link_change, + .reset = emac_sgmii_common_reset, +}; +#endif static int emac_sgmii_acpi_match(struct device *dev, void *data) { @@ -249,7 +309,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) {} }; const struct acpi_device_id *id = acpi_match_device(match_table, dev); - emac_sgmii_function *initialize = data; + struct sgmii_ops **ops = data; if (id) { acpi_handle handle = ACPI_HANDLE(dev); @@ -270,10 +330,10 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) switch (hrv) { case 1: - *initialize = emac_sgmii_init_qdf2432; + *ops = &qdf2432_ops; return 1; case 2: - *initialize = emac_sgmii_init_qdf2400; + *ops = &qdf2400_ops; return 1; } } @@ -285,23 +345,15 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data) static const struct of_device_id emac_sgmii_dt_match[] = { { .compatible = "qcom,fsm9900-emac-sgmii", - .data = emac_sgmii_init_fsm9900, + .data = &fsm9900_ops, }, { .compatible = "qcom,qdf2432-emac-sgmii", - .data = emac_sgmii_init_qdf2432, + .data = &qdf2432_ops, }, {} }; -/* Dummy function for systems without an internal PHY. This avoids having - * to check for NULL pointers before calling the functions. - */ -static int emac_sgmii_dummy(struct emac_adapter *adpt) -{ - return 0; -} - int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) { struct platform_device *sgmii_pdev = NULL; @@ -312,22 +364,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) if (has_acpi_companion(&pdev->dev)) { struct device *dev; - dev = device_find_child(&pdev->dev, &phy->initialize, + dev = device_find_child(&pdev->dev, &phy->sgmii_ops, emac_sgmii_acpi_match); if (!dev) { dev_warn(&pdev->dev, "cannot find internal phy node\n"); - /* There is typically no internal PHY on emulation - * systems, so if we can't find the node, assume - * we are on an emulation system and stub-out - * support for the internal PHY. These systems only - * use ACPI. - */ - phy->open = emac_sgmii_dummy; - phy->close = emac_sgmii_dummy; - phy->link_up = emac_sgmii_dummy; - phy->link_down = emac_sgmii_dummy; - return 0; } @@ -355,14 +396,9 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) goto error_put_device; } - phy->initialize = (emac_sgmii_function)match->data; + phy->sgmii_ops = (struct sgmii_ops *)match->data; } - phy->open = emac_sgmii_open; - phy->close = emac_sgmii_close; - phy->link_up = emac_sgmii_link_up; - phy->link_down = emac_sgmii_link_down; - /* Base address is the first address */ res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0); if (!res) { @@ -386,7 +422,7 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) } } - ret = phy->initialize(adpt); + ret = emac_sgmii_init(adpt); if (ret) goto error; diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h index e7c0c3b2baa4..31ba21eb61d2 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h @@ -16,36 +16,44 @@ struct emac_adapter; struct platform_device; -typedef int (*emac_sgmii_function)(struct emac_adapter *adpt); +/** emac_sgmii - internal emac phy + * @init initialization function + * @open called when the driver is opened + * @close called when the driver is closed + * @link_change called when the link state changes + */ +struct sgmii_ops { + int (*init)(struct emac_adapter *adpt); + int (*open)(struct emac_adapter *adpt); + void (*close)(struct emac_adapter *adpt); + int (*link_change)(struct emac_adapter *adpt, bool link_state); + void (*reset)(struct emac_adapter *adpt); +}; /** emac_sgmii - internal emac phy * @base base address * @digital per-lane digital block * @irq the interrupt number * @decode_error_count reference count of consecutive decode errors - * @initialize initialization function - * @open called when the driver is opened - * @close called when the driver is closed - * @link_up called when the link comes up - * @link_down called when the link comes down + * @sgmii_ops sgmii ops */ struct emac_sgmii { void __iomem *base; void __iomem *digital; unsigned int irq; atomic_t decode_error_count; - emac_sgmii_function initialize; - emac_sgmii_function open; - emac_sgmii_function close; - emac_sgmii_function link_up; - emac_sgmii_function link_down; + struct sgmii_ops *sgmii_ops; }; int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); -void emac_sgmii_reset(struct emac_adapter *adpt); int emac_sgmii_init_fsm9900(struct emac_adapter *adpt); int emac_sgmii_init_qdf2432(struct emac_adapter *adpt); int emac_sgmii_init_qdf2400(struct emac_adapter *adpt); +int emac_sgmii_init(struct emac_adapter *adpt); +int emac_sgmii_open(struct emac_adapter *adpt); +void emac_sgmii_close(struct emac_adapter *adpt); +int emac_sgmii_link_change(struct emac_adapter *adpt, bool link_state); +void emac_sgmii_reset(struct emac_adapter *adpt); #endif diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 13235baf4766..2a0cbc535a2e 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -253,7 +253,7 @@ static int emac_open(struct net_device *netdev) return ret; } - ret = adpt->phy.open(adpt); + ret = emac_sgmii_open(adpt); if (ret) { emac_mac_rx_tx_rings_free_all(adpt); free_irq(irq->irq, irq); @@ -264,7 +264,7 @@ static int emac_open(struct net_device *netdev) if (ret) { emac_mac_rx_tx_rings_free_all(adpt); free_irq(irq->irq, irq); - adpt->phy.close(adpt); + emac_sgmii_close(adpt); return ret; } @@ -278,7 +278,7 @@ static int emac_close(struct net_device *netdev) mutex_lock(&adpt->reset_lock); - adpt->phy.close(adpt); + emac_sgmii_close(adpt); emac_mac_down(adpt); emac_mac_rx_tx_rings_free_all(adpt); @@ -761,11 +761,10 @@ static void emac_shutdown(struct platform_device *pdev) { struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); - struct emac_sgmii *sgmii = &adpt->phy; if (netdev->flags & IFF_UP) { /* Closing the SGMII turns off its interrupts */ - sgmii->close(adpt); + emac_sgmii_close(adpt); /* Resetting the MAC turns off all DMA and its interrupts */ emac_mac_reset(adpt); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 0b5b5da80198..34ac45a774e7 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -54,11 +54,24 @@ struct rmnet_pcpu_stats { struct u64_stats_sync syncp; }; +struct rmnet_priv_stats { + u64 csum_ok; + u64 csum_valid_unset; + u64 csum_validation_failed; + u64 csum_err_bad_buffer; + u64 csum_err_invalid_ip_version; + u64 csum_err_invalid_transport; + u64 csum_fragmented_pkt; + u64 csum_skipped; + u64 csum_sw; +}; + struct rmnet_priv { u8 mux_id; struct net_device *real_dev; struct rmnet_pcpu_stats __percpu *pcpu_stats; struct gro_cells gro_cells; + struct rmnet_priv_stats stats; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 6fcd586e9804..7fd86d40a337 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -148,7 +148,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) - goto fail; + return -ENOMEM; } if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) @@ -156,17 +156,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) - goto fail; + return -ENOMEM; map_header->mux_id = mux_id; skb->protocol = htons(ETH_P_MAP); return 0; - -fail: - kfree_skb(skb); - return -ENOMEM; } static void @@ -228,15 +224,18 @@ void rmnet_egress_handler(struct sk_buff *skb) mux_id = priv->mux_id; port = rmnet_get_port(skb->dev); - if (!port) { - kfree_skb(skb); - return; - } + if (!port) + goto drop; if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) - return; + goto drop; rmnet_vnd_tx_fixup(skb, orig_dev); dev_queue_xmit(skb); + return; + +drop: + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); + kfree_skb(skb); } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 78fdad0c6f76..3ee8ae9b6838 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -67,28 +67,20 @@ static void rmnet_map_send_ack(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; - int xmit_status; - - if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { - if (skb->len < sizeof(struct rmnet_map_header) + - RMNET_MAP_GET_LENGTH(skb) + - sizeof(struct rmnet_map_dl_csum_trailer)) { - kfree_skb(skb); - return; - } - - skb_trim(skb, skb->len - - sizeof(struct rmnet_map_dl_csum_trailer)); - } + struct net_device *dev = skb->dev; + + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) + skb_trim(skb, + skb->len - sizeof(struct rmnet_map_dl_csum_trailer)); skb->protocol = htons(ETH_P_MAP); cmd = RMNET_MAP_GET_CMD_START(skb); cmd->cmd_type = type & 0x03; - netif_tx_lock(skb->dev); - xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); - netif_tx_unlock(skb->dev); + netif_tx_lock(dev); + dev->netdev_ops->ndo_start_xmit(skb, dev); + netif_tx_unlock(dev); } /* Process MAP command frame and send N/ACK message as appropriate. Message cmd diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index a6ea09416f8d..57a9c314a665 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -48,7 +48,8 @@ static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, static int rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, - struct rmnet_map_dl_csum_trailer *csum_trailer) + struct rmnet_map_dl_csum_trailer *csum_trailer, + struct rmnet_priv *priv) { __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; u16 csum_value, csum_value_final; @@ -58,19 +59,25 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, ip4h = (struct iphdr *)(skb->data); if ((ntohs(ip4h->frag_off) & IP_MF) || - ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) + ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) { + priv->stats.csum_fragmented_pkt++; return -EOPNOTSUPP; + } txporthdr = skb->data + ip4h->ihl * 4; csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); - if (!csum_field) + if (!csum_field) { + priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; + } /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ - if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) + if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) { + priv->stats.csum_skipped++; return 0; + } csum_value = ~ntohs(csum_trailer->csum_value); hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); @@ -102,16 +109,20 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, } } - if (csum_value_final == ntohs((__force __be16)*csum_field)) + if (csum_value_final == ntohs((__force __be16)*csum_field)) { + priv->stats.csum_ok++; return 0; - else + } else { + priv->stats.csum_validation_failed++; return -EINVAL; + } } #if IS_ENABLED(CONFIG_IPV6) static int rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, - struct rmnet_map_dl_csum_trailer *csum_trailer) + struct rmnet_map_dl_csum_trailer *csum_trailer, + struct rmnet_priv *priv) { __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; u16 csum_value, csum_value_final; @@ -125,8 +136,10 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, txporthdr = skb->data + sizeof(struct ipv6hdr); csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); - if (!csum_field) + if (!csum_field) { + priv->stats.csum_err_invalid_transport++; return -EPROTONOSUPPORT; + } csum_value = ~ntohs(csum_trailer->csum_value); ip6_hdr_csum = (__force __be16) @@ -164,10 +177,13 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, } } - if (csum_value_final == ntohs((__force __be16)*csum_field)) + if (csum_value_final == ntohs((__force __be16)*csum_field)) { + priv->stats.csum_ok++; return 0; - else + } else { + priv->stats.csum_validation_failed++; return -EINVAL; + } } #endif @@ -339,24 +355,34 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, */ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) { + struct rmnet_priv *priv = netdev_priv(skb->dev); struct rmnet_map_dl_csum_trailer *csum_trailer; - if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { + priv->stats.csum_sw++; return -EOPNOTSUPP; + } csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); - if (!csum_trailer->valid) + if (!csum_trailer->valid) { + priv->stats.csum_valid_unset++; return -EINVAL; + } - if (skb->protocol == htons(ETH_P_IP)) - return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); - else if (skb->protocol == htons(ETH_P_IPV6)) + if (skb->protocol == htons(ETH_P_IP)) { + return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); + } else if (skb->protocol == htons(ETH_P_IPV6)) { #if IS_ENABLED(CONFIG_IPV6) - return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); + return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); #else + priv->stats.csum_err_invalid_ip_version++; return -EPROTONOSUPPORT; #endif + } else { + priv->stats.csum_err_invalid_ip_version++; + return -EPROTONOSUPPORT; + } return 0; } @@ -367,6 +393,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev) { + struct rmnet_priv *priv = netdev_priv(orig_dev); struct rmnet_map_ul_csum_header *ul_header; void *iphdr; @@ -389,8 +416,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); return; #else + priv->stats.csum_err_invalid_ip_version++; goto sw_csum; #endif + } else { + priv->stats.csum_err_invalid_ip_version++; } } @@ -399,4 +429,6 @@ sw_csum: ul_header->csum_insert_offset = 0; ul_header->csum_enabled = 0; ul_header->udp_ip4_ind = 0; + + priv->stats.csum_sw++; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 2ea16a088de8..b9a7548ec6a0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -152,6 +152,56 @@ static const struct net_device_ops rmnet_vnd_ops = { .ndo_get_stats64 = rmnet_get_stats64, }; +static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { + "Checksum ok", + "Checksum valid bit not set", + "Checksum validation failed", + "Checksum error bad buffer", + "Checksum error bad ip version", + "Checksum error bad transport", + "Checksum skipped on ip fragment", + "Checksum skipped", + "Checksum computed in software", +}; + +static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(buf, &rmnet_gstrings_stats, + sizeof(rmnet_gstrings_stats)); + break; + } +} + +static int rmnet_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rmnet_gstrings_stats); + default: + return -EOPNOTSUPP; + } +} + +static void rmnet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_priv_stats *st = &priv->stats; + + if (!data) + return; + + memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64)); +} + +static const struct ethtool_ops rmnet_ethtool_ops = { + .get_ethtool_stats = rmnet_get_ethtool_stats, + .get_strings = rmnet_get_strings, + .get_sset_count = rmnet_get_sset_count, +}; + /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU, * flags, ARP type, needed headroom, etc... */ @@ -170,6 +220,11 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); rmnet_dev->needs_free_netdev = true; + rmnet_dev->ethtool_ops = &rmnet_ethtool_ops; + + /* This perm addr will be used as interface identifier by IPv6 */ + rmnet_dev->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(rmnet_dev->perm_addr); } /* Exposed API */ diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index d118da5a10a2..ffd68a7bc9e1 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1104,7 +1104,6 @@ static int rtl8139_init_one(struct pci_dev *pdev, return 0; err_out: - netif_napi_del(&tp->napi); __rtl8139_cleanup_dev (dev); pci_disable_device (pdev); return i; @@ -1119,7 +1118,6 @@ static void rtl8139_remove_one(struct pci_dev *pdev) assert (dev != NULL); cancel_delayed_work_sync(&tp->thread); - netif_napi_del(&tp->napi); unregister_netdev (dev); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index c7aac1fc99e8..75dfac0248f4 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -84,12 +84,11 @@ The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; -#define MAX_READ_REQUEST_SHIFT 12 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ #define R8169_REGS_SIZE 256 -#define R8169_NAPI_WEIGHT 64 +#define R8169_RX_BUF_SIZE (SZ_16K - 1) #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) @@ -172,12 +171,11 @@ enum rtl_tx_desc_version { #define JUMBO_7K (7*1024 - ETH_HLEN - 2) #define JUMBO_9K (9*1024 - ETH_HLEN - 2) -#define _R(NAME,TD,FW,SZ,B) { \ +#define _R(NAME,TD,FW,SZ) { \ .name = NAME, \ .txd_version = TD, \ .fw_name = FW, \ .jumbo_max = SZ, \ - .jumbo_tx_csum = B \ } static const struct { @@ -185,135 +183,111 @@ static const struct { enum rtl_tx_desc_version txd_version; const char *fw_name; u16 jumbo_max; - bool jumbo_tx_csum; } rtl_chip_infos[] = { /* PCI devices. */ [RTL_GIGA_MAC_VER_01] = - _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_02] = - _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_03] = - _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_04] = - _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_05] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K), [RTL_GIGA_MAC_VER_06] = - _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K), /* PCI-E devices. */ [RTL_GIGA_MAC_VER_07] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_08] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_09] = - _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_10] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_11] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_12] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_13] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_14] = - _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_15] = - _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_16] = - _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K), [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K), [RTL_GIGA_MAC_VER_18] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_19] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_20] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_21] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_22] = - _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_23] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_24] = - _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K), [RTL_GIGA_MAC_VER_25] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, - JUMBO_9K, false), + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, JUMBO_9K), [RTL_GIGA_MAC_VER_26] = - _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, - JUMBO_9K, false), + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, JUMBO_9K), [RTL_GIGA_MAC_VER_27] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_28] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_29] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, - JUMBO_1K, true), + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_30] = - _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, - JUMBO_1K, true), + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_31] = - _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_32] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, - JUMBO_9K, false), + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, JUMBO_9K), [RTL_GIGA_MAC_VER_33] = - _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, - JUMBO_9K, false), + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, JUMBO_9K), [RTL_GIGA_MAC_VER_34] = - _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, - JUMBO_9K, false), + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, JUMBO_9K), [RTL_GIGA_MAC_VER_35] = - _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, - JUMBO_9K, false), + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, JUMBO_9K), [RTL_GIGA_MAC_VER_36] = - _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, - JUMBO_9K, false), + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, JUMBO_9K), [RTL_GIGA_MAC_VER_37] = - _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, - JUMBO_1K, true), + _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, JUMBO_1K), [RTL_GIGA_MAC_VER_38] = - _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, - JUMBO_9K, false), + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, JUMBO_9K), [RTL_GIGA_MAC_VER_39] = - _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, - JUMBO_1K, true), + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_40] = - _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, - JUMBO_9K, false), + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, JUMBO_9K), [RTL_GIGA_MAC_VER_41] = - _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), + _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_42] = - _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, - JUMBO_9K, false), + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, JUMBO_9K), [RTL_GIGA_MAC_VER_43] = - _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, - JUMBO_1K, true), + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, JUMBO_1K), [RTL_GIGA_MAC_VER_44] = - _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, - JUMBO_9K, false), + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, JUMBO_9K), [RTL_GIGA_MAC_VER_45] = - _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, - JUMBO_9K, false), + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, JUMBO_9K), [RTL_GIGA_MAC_VER_46] = - _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, - JUMBO_9K, false), + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, JUMBO_9K), [RTL_GIGA_MAC_VER_47] = - _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, - JUMBO_1K, false), + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, JUMBO_1K), [RTL_GIGA_MAC_VER_48] = - _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, - JUMBO_1K, false), + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, JUMBO_1K), [RTL_GIGA_MAC_VER_49] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, - JUMBO_9K, false), + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_50] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, - JUMBO_9K, false), + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), [RTL_GIGA_MAC_VER_51] = - _R("RTL8168ep/8111ep", RTL_TD_1, NULL, - JUMBO_9K, false), + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, JUMBO_9K), }; #undef _R @@ -345,7 +319,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); -static int rx_buf_sz = 16383; static int use_dac = -1; static struct { u32 msg_enable; @@ -437,13 +410,8 @@ enum rtl8168_8101_registers { CSIAR = 0x68, #define CSIAR_FLAG 0x80000000 #define CSIAR_WRITE_CMD 0x80000000 -#define CSIAR_BYTE_ENABLE 0x0f -#define CSIAR_BYTE_ENABLE_SHIFT 12 -#define CSIAR_ADDR_MASK 0x0fff -#define CSIAR_FUNC_CARD 0x00000000 -#define CSIAR_FUNC_SDIO 0x00010000 -#define CSIAR_FUNC_NIC 0x00020000 -#define CSIAR_FUNC_NIC2 0x00010000 +#define CSIAR_BYTE_ENABLE 0x0000f000 +#define CSIAR_ADDR_MASK 0x00000fff PMCH = 0x6f, EPHYAR = 0x80, #define EPHYAR_FLAG 0x80000000 @@ -626,6 +594,7 @@ enum rtl_register_content { RxChkSum = (1 << 5), PCIDAC = (1 << 4), PCIMulRW = (1 << 3), +#define INTT_MASK GENMASK(1, 0) INTT_0 = 0x0000, // 8168 INTT_1 = 0x0001, // 8168 INTT_2 = 0x0002, // 8168 @@ -716,6 +685,7 @@ enum rtl_rx_desc_bit { }; #define RsvdMask 0x3fffc000 +#define CPCMD_QUIRK_MASK (Normal_mode | RxVlan | RxChkSum | INTT_MASK) struct TxDesc { __le32 opts1; @@ -778,7 +748,6 @@ struct rtl8169_private { struct net_device *dev; struct napi_struct napi; u32 msg_enable; - u16 txd_version; u16 mac_version; u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ @@ -802,26 +771,16 @@ struct rtl8169_private { int (*read)(struct rtl8169_private *, int); } mdio_ops; - struct pll_power_ops { - void (*down)(struct rtl8169_private *); - void (*up)(struct rtl8169_private *); - } pll_power_ops; - struct jumbo_ops { void (*enable)(struct rtl8169_private *); void (*disable)(struct rtl8169_private *); } jumbo_ops; - struct csi_ops { - void (*write)(struct rtl8169_private *, int, int); - u32 (*read)(struct rtl8169_private *, int); - } csi_ops; - int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *); void (*phy_reset_enable)(struct rtl8169_private *tp); - void (*hw_start)(struct net_device *); + void (*hw_start)(struct rtl8169_private *tp); unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); unsigned int (*link_ok)(struct rtl8169_private *tp); int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); @@ -833,14 +792,11 @@ struct rtl8169_private { struct work_struct work; } wk; - unsigned features; - struct mii_if_info mii; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; u32 saved_wolopts; - u32 opts1_mask; struct rtl_fw { const struct firmware *fw; @@ -1645,23 +1601,8 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) if (options & LinkUp) wolopts |= WAKE_PHY; switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) wolopts |= WAKE_MAGIC; break; @@ -1722,23 +1663,8 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) RTL_W8(tp, Cfg9346, Cfg9346_Unlock); switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: tmp = ARRAY_SIZE(cfg) - 1; if (wolopts & WAKE_MAGIC) rtl_w0w1_eri(tp, @@ -1960,18 +1886,20 @@ static netdev_features_t rtl8169_fix_features(struct net_device *dev, features &= ~NETIF_F_ALL_TSO; if (dev->mtu > JUMBO_1K && - !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + tp->mac_version > RTL_GIGA_MAC_VER_06) features &= ~NETIF_F_IP_CSUM; return features; } -static void __rtl8169_set_features(struct net_device *dev, - netdev_features_t features) +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) { struct rtl8169_private *tp = netdev_priv(dev); u32 rx_config; + rtl_lock_work(tp); + rx_config = RTL_R32(tp, RxConfig); if (features & NETIF_F_RXALL) rx_config |= (AcceptErr | AcceptRunt); @@ -1990,28 +1918,14 @@ static void __rtl8169_set_features(struct net_device *dev, else tp->cp_cmd &= ~RxVlan; - tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum); - RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_R16(tp, CPlusCmd); -} - -static int rtl8169_set_features(struct net_device *dev, - netdev_features_t features) -{ - struct rtl8169_private *tp = netdev_priv(dev); - - features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; - rtl_lock_work(tp); - if (features ^ dev->features) - __rtl8169_set_features(dev, features); rtl_unlock_work(tp); return 0; } - static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) { return (skb_vlan_tag_present(skb)) ? @@ -2155,9 +2069,8 @@ DECLARE_RTL_COND(rtl_counters_cond) return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump); } -static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) +static bool rtl8169_do_counters(struct rtl8169_private *tp, u32 counter_cmd) { - struct rtl8169_private *tp = netdev_priv(dev); dma_addr_t paddr = tp->counters_phys_addr; u32 cmd; @@ -2170,10 +2083,8 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); } -static bool rtl8169_reset_counters(struct net_device *dev) +static bool rtl8169_reset_counters(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - /* * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the * tally counters. @@ -2181,13 +2092,11 @@ static bool rtl8169_reset_counters(struct net_device *dev) if (tp->mac_version < RTL_GIGA_MAC_VER_19) return true; - return rtl8169_do_counters(dev, CounterReset); + return rtl8169_do_counters(tp, CounterReset); } -static bool rtl8169_update_counters(struct net_device *dev) +static bool rtl8169_update_counters(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - /* * Some chips are unable to dump tally counters when the receiver * is disabled. @@ -2195,12 +2104,11 @@ static bool rtl8169_update_counters(struct net_device *dev) if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) return true; - return rtl8169_do_counters(dev, CounterDump); + return rtl8169_do_counters(tp, CounterDump); } -static bool rtl8169_init_counter_offsets(struct net_device *dev) +static bool rtl8169_init_counter_offsets(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); struct rtl8169_counters *counters = tp->counters; bool ret = false; @@ -2223,10 +2131,10 @@ static bool rtl8169_init_counter_offsets(struct net_device *dev) return true; /* If both, reset and update fail, propagate to caller. */ - if (rtl8169_reset_counters(dev)) + if (rtl8169_reset_counters(tp)) ret = true; - if (rtl8169_update_counters(dev)) + if (rtl8169_update_counters(tp)) ret = true; tp->tc_offset.tx_errors = counters->tx_errors; @@ -2249,7 +2157,7 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, pm_runtime_get_noresume(d); if (pm_runtime_active(d)) - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); pm_runtime_put_noidle(d); @@ -2391,7 +2299,7 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) if (IS_ERR(ci)) return PTR_ERR(ci); - scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3]; + scale = &ci->scalev[tp->cp_cmd & INTT_MASK]; /* read IntrMitigate and adjust according to scale */ for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { @@ -2490,7 +2398,7 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) RTL_W16(tp, IntrMitigate, swab16(w)); - tp->cp_cmd = (tp->cp_cmd & ~3) | cp01; + tp->cp_cmd = (tp->cp_cmd & ~INTT_MASK) | cp01; RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_R16(tp, CPlusCmd); @@ -2520,7 +2428,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, - struct net_device *dev, u8 default_version) + u8 default_version) { /* * The driver currently handles the 8168Bf and the 8168Be identically @@ -2560,12 +2468,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, /* 8168E family. */ { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, - { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, /* 8168D family. */ - { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, @@ -2575,32 +2481,24 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, /* 8168C family. */ - { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, - { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, /* 8168B family. */ { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, - { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, /* 8101 family. */ - { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 }, { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, - { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, - { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, - { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, - { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, @@ -2635,8 +2533,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, tp->mac_version = p->mac_version; if (tp->mac_version == RTL_GIGA_MAC_NONE) { - netif_notice(tp, probe, dev, - "unknown MAC, using family default\n"); + dev_notice(tp_to_dev(tp), + "unknown MAC, using family default\n"); tp->mac_version = default_version; } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { tp->mac_version = tp->mii.supports_gmii ? @@ -4685,18 +4583,7 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) ops->write = r8168dp_2_mdio_write; ops->read = r8168dp_2_mdio_read; break; - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: ops->write = r8168g_mdio_write; ops->read = r8168g_mdio_read; break; @@ -4741,21 +4628,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_39: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_51: RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -4775,79 +4648,13 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) return true; } -static void r810x_phy_power_down(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); -} - -static void r810x_phy_power_up(struct rtl8169_private *tp) -{ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); -} - -static void r810x_pll_power_down(struct rtl8169_private *tp) -{ - if (rtl_wol_pll_power_down(tp)) - return; - - r810x_phy_power_down(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_16: - break; - default: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); - break; - } -} - -static void r810x_pll_power_up(struct rtl8169_private *tp) -{ - r810x_phy_power_up(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_16: - break; - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); - break; - default: - RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); - break; - } -} - static void r8168_phy_power_up(struct rtl8169_private *tp) { rtl_writephy(tp, 0x1f, 0x0000); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_writephy(tp, 0x0e, 0x0000); break; @@ -4855,6 +4662,9 @@ static void r8168_phy_power_up(struct rtl8169_private *tp) break; } rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); + + /* give MAC/PHY some time to resume */ + msleep(20); } static void r8168_phy_power_down(struct rtl8169_private *tp) @@ -4870,18 +4680,7 @@ static void r8168_phy_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_11: case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_17 ... RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_writephy(tp, 0x0e, 0x0200); default: @@ -4895,12 +4694,6 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) if (r8168_check_dash(tp)) return; - if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || - tp->mac_version == RTL_GIGA_MAC_VER_24) && - (RTL_R16(tp, CPlusCmd) & ASF)) { - return; - } - if (tp->mac_version == RTL_GIGA_MAC_VER_32 || tp->mac_version == RTL_GIGA_MAC_VER_33) rtl_ephy_write(tp, 0x19, 0xff64); @@ -4911,16 +4704,15 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) r8168_phy_power_down(tp); switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: case RTL_GIGA_MAC_VER_44: case RTL_GIGA_MAC_VER_45: case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); @@ -4938,18 +4730,17 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) static void r8168_pll_power_up(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80); break; case RTL_GIGA_MAC_VER_44: case RTL_GIGA_MAC_VER_45: case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: case RTL_GIGA_MAC_VER_50: case RTL_GIGA_MAC_VER_51: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); @@ -4966,130 +4757,41 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) r8168_phy_power_up(tp); } -static void rtl_generic_op(struct rtl8169_private *tp, - void (*op)(struct rtl8169_private *)) -{ - if (op) - op(tp); -} - static void rtl_pll_power_down(struct rtl8169_private *tp) { - rtl_generic_op(tp, tp->pll_power_ops.down); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: + break; + default: + r8168_pll_power_down(tp); + } } static void rtl_pll_power_up(struct rtl8169_private *tp) { - rtl_generic_op(tp, tp->pll_power_ops.up); - - /* give MAC/PHY some time to resume */ - msleep(20); -} - -static void rtl_init_pll_power_ops(struct rtl8169_private *tp) -{ - struct pll_power_ops *ops = &tp->pll_power_ops; - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_07: - case RTL_GIGA_MAC_VER_08: - case RTL_GIGA_MAC_VER_09: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_16: - case RTL_GIGA_MAC_VER_29: - case RTL_GIGA_MAC_VER_30: - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - ops->down = r810x_pll_power_down; - ops->up = r810x_pll_power_up; + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_13 ... RTL_GIGA_MAC_VER_15: break; - - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_17: - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: - case RTL_GIGA_MAC_VER_25: - case RTL_GIGA_MAC_VER_26: - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - case RTL_GIGA_MAC_VER_32: - case RTL_GIGA_MAC_VER_33: - case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_35: - case RTL_GIGA_MAC_VER_36: - case RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - ops->down = r8168_pll_power_down; - ops->up = r8168_pll_power_up; - break; - default: - ops->down = NULL; - ops->up = NULL; - break; + r8168_pll_power_up(tp); } } static void rtl_init_rxcfg(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01: - case RTL_GIGA_MAC_VER_02: - case RTL_GIGA_MAC_VER_03: - case RTL_GIGA_MAC_VER_04: - case RTL_GIGA_MAC_VER_05: - case RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_14: - case RTL_GIGA_MAC_VER_15: - case RTL_GIGA_MAC_VER_16: - case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17: RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_18: - case RTL_GIGA_MAC_VER_19: - case RTL_GIGA_MAC_VER_20: - case RTL_GIGA_MAC_VER_21: - case RTL_GIGA_MAC_VER_22: - case RTL_GIGA_MAC_VER_23: - case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_35: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); break; - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; default: @@ -5105,16 +4807,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - rtl_generic_op(tp, tp->jumbo_ops.enable); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + if (tp->jumbo_ops.enable) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + tp->jumbo_ops.enable(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + } } static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - rtl_generic_op(tp, tp->jumbo_ops.disable); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + if (tp->jumbo_ops.disable) { + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + tp->jumbo_ops.disable(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + } } static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5128,7 +4834,7 @@ static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5154,7 +4860,7 @@ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) RTL_W8(tp, MaxTxPacketSize, 0x0c); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5166,7 +4872,7 @@ static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) { rtl_tx_performance_tweak(tp, - (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); + PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN); } static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) @@ -5226,18 +4932,7 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp) * No action needed for jumbo frames with 8169. * No jumbo for 810x at all. */ - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: default: ops->disable = NULL; ops->enable = NULL; @@ -5323,32 +5018,21 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) rtl_rx_close(tp); - if (tp->mac_version == RTL_GIGA_MAC_VER_27 || - tp->mac_version == RTL_GIGA_MAC_VER_28 || - tp->mac_version == RTL_GIGA_MAC_VER_31) { + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); - } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36 || - tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_38 || - tp->mac_version == RTL_GIGA_MAC_VER_40 || - tp->mac_version == RTL_GIGA_MAC_VER_41 || - tp->mac_version == RTL_GIGA_MAC_VER_42 || - tp->mac_version == RTL_GIGA_MAC_VER_43 || - tp->mac_version == RTL_GIGA_MAC_VER_44 || - tp->mac_version == RTL_GIGA_MAC_VER_45 || - tp->mac_version == RTL_GIGA_MAC_VER_46 || - tp->mac_version == RTL_GIGA_MAC_VER_47 || - tp->mac_version == RTL_GIGA_MAC_VER_48 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) { + break; + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); - } else { + break; + default: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); udelay(100); + break; } rtl_hw_reset(tp); @@ -5361,13 +5045,10 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) (InterFrameGap << TxInterFrameGapShift)); } -static void rtl_hw_start(struct net_device *dev) +static void rtl_set_rx_max_size(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - - tp->hw_start(dev); - - rtl_irq_enable_all(tp); + /* Low hurts. Let's disable the filtering. */ + RTL_W16(tp, RxMaxSize, R8169_RX_BUF_SIZE + 1); } static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) @@ -5383,21 +5064,6 @@ static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp) RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); } -static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp) -{ - u16 cmd; - - cmd = RTL_R16(tp, CPlusCmd); - RTL_W16(tp, CPlusCmd, cmd); - return cmd; -} - -static void rtl_set_rx_max_size(struct rtl8169_private *tp, unsigned int rx_buf_sz) -{ - /* Low hurts. Let's disable the filtering. */ - RTL_W16(tp, RxMaxSize, rx_buf_sz + 1); -} - static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version) { static const struct rtl_cfg2_info { @@ -5475,36 +5141,34 @@ static void rtl_set_rx_mode(struct net_device *dev) RTL_W32(tp, RxConfig, tmp); } -static void rtl_hw_start_8169(struct net_device *dev) +static void rtl_hw_start(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - - if (tp->mac_version == RTL_GIGA_MAC_VER_05) { - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW); - pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); - } - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - if (tp->mac_version == RTL_GIGA_MAC_VER_01 || - tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03 || - tp->mac_version == RTL_GIGA_MAC_VER_04) - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_init_rxcfg(tp); + tp->hw_start(tp); - RTL_W8(tp, EarlyTxThres, NoEarlyTx); + rtl_set_rx_max_size(tp); + rtl_set_rx_tx_desc_registers(tp); + rtl_set_rx_tx_config_registers(tp); + RTL_W8(tp, Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(tp, IntrMask); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_mode(tp->dev); + /* no early-rx interrupts */ + RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); + rtl_irq_enable_all(tp); +} - rtl_set_rx_max_size(tp, rx_buf_sz); +static void rtl_hw_start_8169(struct rtl8169_private *tp) +{ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - if (tp->mac_version == RTL_GIGA_MAC_VER_01 || - tp->mac_version == RTL_GIGA_MAC_VER_02 || - tp->mac_version == RTL_GIGA_MAC_VER_03 || - tp->mac_version == RTL_GIGA_MAC_VER_04) - rtl_set_rx_tx_config_registers(tp); + RTL_W8(tp, EarlyTxThres, NoEarlyTx); - tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW; + tp->cp_cmd |= PCIMulRW; if (tp->mac_version == RTL_GIGA_MAC_VER_02 || tp->mac_version == RTL_GIGA_MAC_VER_03) { @@ -5523,56 +5187,7 @@ static void rtl_hw_start_8169(struct net_device *dev) */ RTL_W16(tp, IntrMitigate, 0x0000); - rtl_set_rx_tx_desc_registers(tp); - - if (tp->mac_version != RTL_GIGA_MAC_VER_01 && - tp->mac_version != RTL_GIGA_MAC_VER_02 && - tp->mac_version != RTL_GIGA_MAC_VER_03 && - tp->mac_version != RTL_GIGA_MAC_VER_04) { - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - rtl_set_rx_tx_config_registers(tp); - } - - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ - RTL_R8(tp, IntrMask); - RTL_W32(tp, RxMissed, 0); - - rtl_set_rx_mode(dev); - - /* no early-rx interrupts */ - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); -} - -static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) -{ - if (tp->csi_ops.write) - tp->csi_ops.write(tp, addr, value); -} - -static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) -{ - return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0; -} - -static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) -{ - u32 csi; - - csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; - rtl_csi_write(tp, 0x070c, csi | bits); -} - -static void rtl_csi_access_enable_1(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x17000000); -} - -static void rtl_csi_access_enable_2(struct rtl8169_private *tp) -{ - rtl_csi_access_enable(tp, 0x27000000); } DECLARE_RTL_COND(rtl_csiar_cond) @@ -5580,101 +5195,55 @@ DECLARE_RTL_COND(rtl_csiar_cond) return RTL_R32(tp, CSIAR) & CSIAR_FLAG; } -static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) -{ - RTL_W32(tp, CSIDR, value); - RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); - - rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); -} - -static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) { - RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); - - return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(tp, CSIDR) : ~0; -} + u32 func = PCI_FUNC(tp->pci_dev->devfn); -static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) -{ RTL_W32(tp, CSIDR, value); RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | - CSIAR_FUNC_NIC); + CSIAR_BYTE_ENABLE | func << 16); rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); } -static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) { - RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + u32 func = PCI_FUNC(tp->pci_dev->devfn); + + RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | func << 16 | + CSIAR_BYTE_ENABLE); return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? RTL_R32(tp, CSIDR) : ~0; } -static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) +static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) { - RTL_W32(tp, CSIDR, value); - RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | - CSIAR_FUNC_NIC2); + struct pci_dev *pdev = tp->pci_dev; + u32 csi; - rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); + /* According to Realtek the value at config space address 0x070f + * controls the L0s/L1 entrance latency. We try standard ECAM access + * first and if it fails fall back to CSI. + */ + if (pdev->cfg_size > 0x070f && + pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) + return; + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | val << 24); } -static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) +static void rtl_csi_access_enable_1(struct rtl8169_private *tp) { - RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | - CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); - - return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? - RTL_R32(tp, CSIDR) : ~0; + rtl_csi_access_enable(tp, 0x17); } -static void rtl_init_csi_ops(struct rtl8169_private *tp) +static void rtl_csi_access_enable_2(struct rtl8169_private *tp) { - struct csi_ops *ops = &tp->csi_ops; - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_01: - case RTL_GIGA_MAC_VER_02: - case RTL_GIGA_MAC_VER_03: - case RTL_GIGA_MAC_VER_04: - case RTL_GIGA_MAC_VER_05: - case RTL_GIGA_MAC_VER_06: - case RTL_GIGA_MAC_VER_10: - case RTL_GIGA_MAC_VER_11: - case RTL_GIGA_MAC_VER_12: - case RTL_GIGA_MAC_VER_13: - case RTL_GIGA_MAC_VER_14: - case RTL_GIGA_MAC_VER_15: - case RTL_GIGA_MAC_VER_16: - case RTL_GIGA_MAC_VER_17: - ops->write = NULL; - ops->read = NULL; - break; - - case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_38: - ops->write = r8402_csi_write; - ops->read = r8402_csi_read; - break; - - case RTL_GIGA_MAC_VER_44: - ops->write = r8411_csi_write; - ops->read = r8411_csi_read; - break; - - default: - ops->write = r8169_csi_write; - ops->read = r8169_csi_read; - break; - } + rtl_csi_access_enable(tp, 0x27); } struct ephy_info { @@ -5721,25 +5290,15 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) RTL_W8(tp, Config3, data); } -#define R8168_CPCMD_QUIRK_MASK (\ - EnableBist | \ - Mac_dbgo_oe | \ - Force_half_dup | \ - Force_rxflow_en | \ - Force_txflow_en | \ - Cxpl_dbg_sel | \ - ASF | \ - PktCntrDisable | \ - Mac_dbgo_sel) - static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); if (tp->dev->mtu <= ETH_DATA_LEN) { - rtl_tx_performance_tweak(tp, (0x5 << MAX_READ_REQUEST_SHIFT) | + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | PCI_EXP_DEVCTL_NOSNOOP_EN); } } @@ -5760,11 +5319,12 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_disable_clock_request(tp); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) @@ -5791,9 +5351,10 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) @@ -5808,9 +5369,10 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) RTL_W8(tp, MaxTxPacketSize, TxPacketMax); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) @@ -5865,9 +5427,10 @@ static void rtl_hw_start_8168d(struct rtl8169_private *tp) RTL_W8(tp, MaxTxPacketSize, TxPacketMax); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + tp->cp_cmd &= CPCMD_QUIRK_MASK; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); } static void rtl_hw_start_8168dp(struct rtl8169_private *tp) @@ -5875,7 +5438,7 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, MaxTxPacketSize, TxPacketMax); @@ -5892,7 +5455,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, MaxTxPacketSize, TxPacketMax); @@ -5924,7 +5487,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, MaxTxPacketSize, TxPacketMax); @@ -5949,7 +5512,7 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@ -5979,7 +5542,7 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) { rtl_csi_access_enable_2(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); @@ -6050,7 +5613,7 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6150,7 +5713,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6232,7 +5795,7 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_csi_access_enable_1(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); @@ -6328,18 +5891,12 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) r8168_mac_ocp_write(tp, 0xe860, data); } -static void rtl_hw_start_8168(struct net_device *dev) +static void rtl_hw_start_8168(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(tp, rx_buf_sz); - - tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1; - + tp->cp_cmd &= ~INTT_MASK; + tp->cp_cmd |= PktCntrDisable | INTT_1; RTL_W16(tp, CPlusCmd, tp->cp_cmd); RTL_W16(tp, IntrMitigate, 0x5151); @@ -6350,12 +5907,6 @@ static void rtl_hw_start_8168(struct net_device *dev) tp->event_slow &= ~RxOverflow; } - rtl_set_rx_tx_desc_registers(tp); - - rtl_set_rx_tx_config_registers(tp); - - RTL_R8(tp, IntrMask); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_11: rtl_hw_start_8168bb(tp); @@ -6456,30 +6007,11 @@ static void rtl_hw_start_8168(struct net_device *dev) default: printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", - dev->name, tp->mac_version); + tp->dev->name, tp->mac_version); break; } - - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - - rtl_set_rx_mode(dev); - - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } -#define R810X_CPCMD_QUIRK_MASK (\ - EnableBist | \ - Mac_dbgo_oe | \ - Force_half_dup | \ - Force_rxflow_en | \ - Force_txflow_en | \ - Cxpl_dbg_sel | \ - ASF | \ - PktCntrDisable | \ - Mac_dbgo_sel) - static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8102e_1[] = { @@ -6498,7 +6030,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) RTL_W8(tp, DBG_REG, FIX_NAK_1); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, Config1, LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); @@ -6515,7 +6047,7 @@ static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) { rtl_csi_access_enable_2(tp); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); @@ -6578,7 +6110,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); - rtl_tx_performance_tweak(tp, 0x5 << MAX_READ_REQUEST_SHIFT); + rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); @@ -6603,32 +6135,21 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) rtl_pcie_state_l2l3_enable(tp, false); } -static void rtl_hw_start_8101(struct net_device *dev) +static void rtl_hw_start_8101(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - struct pci_dev *pdev = tp->pci_dev; - if (tp->mac_version >= RTL_GIGA_MAC_VER_30) tp->event_slow &= ~RxFIFOOver; if (tp->mac_version == RTL_GIGA_MAC_VER_13 || tp->mac_version == RTL_GIGA_MAC_VER_16) - pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + pcie_capability_set_word(tp->pci_dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_NOSNOOP_EN); - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); - RTL_W8(tp, MaxTxPacketSize, TxPacketMax); - rtl_set_rx_max_size(tp, rx_buf_sz); - - tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + tp->cp_cmd &= CPCMD_QUIRK_MASK; RTL_W16(tp, CPlusCmd, tp->cp_cmd); - rtl_set_rx_tx_desc_registers(tp); - - rtl_set_rx_tx_config_registers(tp); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_07: rtl_hw_start_8102e_1(tp); @@ -6665,17 +6186,7 @@ static void rtl_hw_start_8101(struct net_device *dev) break; } - RTL_W8(tp, Cfg9346, Cfg9346_Lock); - RTL_W16(tp, IntrMitigate, 0x0000); - - RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); - - rtl_set_rx_mode(dev); - - RTL_R8(tp, IntrMask); - - RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); } static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) @@ -6702,29 +6213,22 @@ static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, void **data_buff, struct RxDesc *desc) { - dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), rx_buf_sz, - DMA_FROM_DEVICE); + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), + R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); kfree(*data_buff); *data_buff = NULL; rtl8169_make_unusable_by_asic(desc); } -static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) +static inline void rtl8169_mark_to_asic(struct RxDesc *desc) { u32 eor = le32_to_cpu(desc->opts1) & RingEnd; /* Force memory writes to complete before releasing descriptor */ dma_wmb(); - desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); -} - -static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, - u32 rx_buf_sz) -{ - desc->addr = cpu_to_le64(mapping); - rtl8169_mark_to_asic(desc, rx_buf_sz); + desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); } static inline void *rtl8169_align(void *data) @@ -6738,21 +6242,20 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, void *data; dma_addr_t mapping; struct device *d = tp_to_dev(tp); - struct net_device *dev = tp->dev; - int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; + int node = dev_to_node(d); - data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + data = kmalloc_node(R8169_RX_BUF_SIZE, GFP_KERNEL, node); if (!data) return NULL; if (rtl8169_align(data) != data) { kfree(data); - data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node); if (!data) return NULL; } - mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz, + mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { if (net_ratelimit()) @@ -6760,7 +6263,8 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, goto err_out; } - rtl8169_map_to_asic(desc, mapping, rx_buf_sz); + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc); return data; err_out: @@ -6792,9 +6296,6 @@ static int rtl8169_rx_fill(struct rtl8169_private *tp) for (i = 0; i < NUM_RX_DESC; i++) { void *data; - if (tp->Rx_databuff[i]) - continue; - data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); if (!data) { rtl8169_make_unusable_by_asic(tp->RxDescArray + i); @@ -6811,14 +6312,12 @@ err_out: return -ENOMEM; } -static int rtl8169_init_ring(struct net_device *dev) +static int rtl8169_init_ring(struct rtl8169_private *tp) { - struct rtl8169_private *tp = netdev_priv(dev); - rtl8169_init_ring_indexes(tp); - memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); - memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); + memset(tp->tx_skb, 0, sizeof(tp->tx_skb)); + memset(tp->Rx_databuff, 0, sizeof(tp->Rx_databuff)); return rtl8169_rx_fill(tp); } @@ -6877,13 +6376,13 @@ static void rtl_reset_work(struct rtl8169_private *tp) rtl8169_hw_reset(tp); for (i = 0; i < NUM_RX_DESC; i++) - rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); + rtl8169_mark_to_asic(tp->RxDescArray + i); rtl8169_tx_clear(tp); rtl8169_init_ring_indexes(tp); napi_enable(&tp->napi); - rtl_hw_start(dev); + rtl_hw_start(tp); netif_wake_queue(dev); rtl8169_check_link_status(dev, tp); } @@ -7015,18 +6514,6 @@ static int msdn_giant_send_check(struct sk_buff *skb) return ret; } -static inline __be16 get_protocol(struct sk_buff *skb) -{ - __be16 protocol; - - if (skb->protocol == htons(ETH_P_8021Q)) - protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; - else - protocol = skb->protocol; - - return protocol; -} - static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { @@ -7063,7 +6550,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, return false; } - switch (get_protocol(skb)) { + switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[0] |= TD1_GTSENV4; break; @@ -7095,7 +6582,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, return false; } - switch (get_protocol(skb)) { + switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): opts[1] |= TD1_IPv4_CS; ip_protocol = ip_hdr(skb)->protocol; @@ -7365,7 +6852,7 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, prefetch(data); skb = napi_alloc_skb(&tp->napi, pkt_size); if (skb) - memcpy(skb->data, data, pkt_size); + skb_copy_to_linear_data(skb, data, pkt_size); dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); return skb; @@ -7383,7 +6870,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget struct RxDesc *desc = tp->RxDescArray + entry; u32 status; - status = le32_to_cpu(desc->opts1) & tp->opts1_mask; + status = le32_to_cpu(desc->opts1); if (status & DescOwn) break; @@ -7401,14 +6888,16 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget dev->stats.rx_length_errors++; if (status & RxCRC) dev->stats.rx_crc_errors++; - if (status & RxFOVF) { + /* RxFOVF is a reserved bit on later chip versions */ + if (tp->mac_version == RTL_GIGA_MAC_VER_01 && + status & RxFOVF) { rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); dev->stats.rx_fifo_errors++; - } - if ((status & (RxRUNT | RxCRC)) && - !(status & (RxRWT | RxFOVF)) && - (dev->features & NETIF_F_RXALL)) + } else if (status & (RxRUNT | RxCRC) && + !(status & RxRWT) && + dev->features & NETIF_F_RXALL) { goto process_pkt; + } } else { struct sk_buff *skb; dma_addr_t addr; @@ -7457,7 +6946,7 @@ process_pkt: } release_descriptor: desc->opts2 = 0; - rtl8169_mark_to_asic(desc, rx_buf_sz); + rtl8169_mark_to_asic(desc); } count = cur_rx - tp->cur_rx; @@ -7468,8 +6957,7 @@ release_descriptor: static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { - struct net_device *dev = dev_instance; - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = dev_instance; int handled = 0; u16 status; @@ -7480,7 +6968,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) handled = 1; rtl_irq_disable(tp); - napi_schedule(&tp->napi); + napi_schedule_irqoff(&tp->napi); } } return IRQ_RETVAL(handled); @@ -7631,7 +7119,7 @@ static int rtl8169_close(struct net_device *dev) pm_runtime_get_sync(&pdev->dev); /* Update counters before going down */ - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); rtl_lock_work(tp); clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); @@ -7641,7 +7129,7 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); - pci_free_irq(pdev, 0, dev); + pci_free_irq(pdev, 0, tp); dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); @@ -7686,7 +7174,7 @@ static int rtl_open(struct net_device *dev) if (!tp->RxDescArray) goto err_free_tx_0; - retval = rtl8169_init_ring(dev); + retval = rtl8169_init_ring(tp); if (retval < 0) goto err_free_rx_1; @@ -7696,7 +7184,7 @@ static int rtl_open(struct net_device *dev) rtl_request_firmware(tp); - retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, dev, + retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp, dev->name); if (retval < 0) goto err_release_fw_2; @@ -7709,13 +7197,11 @@ static int rtl_open(struct net_device *dev) rtl8169_init_phy(dev, tp); - __rtl8169_set_features(dev, dev->features); - rtl_pll_power_up(tp); - rtl_hw_start(dev); + rtl_hw_start(tp); - if (!rtl8169_init_counter_offsets(dev)) + if (!rtl8169_init_counter_offsets(tp)) netif_warn(tp, hw, dev, "counter reset/update failed\n"); netif_start_queue(dev); @@ -7784,7 +7270,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) * from tally counters. */ if (pm_runtime_active(&pdev->dev)) - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); /* * Subtract values fetched during initalization. @@ -7880,7 +7366,7 @@ static int rtl8169_runtime_suspend(struct device *device) /* Update counters before going runtime suspend */ rtl8169_rx_missed(dev); - rtl8169_update_counters(dev); + rtl8169_update_counters(tp); return 0; } @@ -8020,9 +7506,7 @@ static const struct net_device_ops rtl_netdev_ops = { }; static const struct rtl_cfg_info { - void (*hw_start)(struct net_device *); - unsigned int region; - unsigned int align; + void (*hw_start)(struct rtl8169_private *tp); u16 event_slow; unsigned int has_gmii:1; const struct rtl_coalesce_info *coalesce_info; @@ -8030,8 +7514,6 @@ static const struct rtl_cfg_info { } rtl_cfg_infos [] = { [RTL_CFG_0] = { .hw_start = rtl_hw_start_8169, - .region = 1, - .align = 0, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8169, @@ -8039,8 +7521,6 @@ static const struct rtl_cfg_info { }, [RTL_CFG_1] = { .hw_start = rtl_hw_start_8168, - .region = 2, - .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8168_8136, @@ -8048,8 +7528,6 @@ static const struct rtl_cfg_info { }, [RTL_CFG_2] = { .hw_start = rtl_hw_start_8101, - .region = 2, - .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | PCSTimeout, .coalesce_info = rtl_coalesce_info_8168_8136, @@ -8125,20 +7603,10 @@ static void rtl_hw_init_8168ep(struct rtl8169_private *tp) static void rtl_hw_initialize(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_40: - case RTL_GIGA_MAC_VER_41: - case RTL_GIGA_MAC_VER_42: - case RTL_GIGA_MAC_VER_43: - case RTL_GIGA_MAC_VER_44: - case RTL_GIGA_MAC_VER_45: - case RTL_GIGA_MAC_VER_46: - case RTL_GIGA_MAC_VER_47: - case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: + case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_51: rtl_hw_init_8168ep(tp); break; default: @@ -8149,11 +7617,10 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; - const unsigned int region = cfg->region; struct rtl8169_private *tp; struct mii_if_info *mii; struct net_device *dev; - int chipset, i; + int chipset, region, i; int rc; if (netif_msg_drv(&debug)) { @@ -8188,43 +7655,41 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); if (rc < 0) { - netif_err(tp, probe, dev, "enable failure\n"); + dev_err(&pdev->dev, "enable failure\n"); return rc; } if (pcim_set_mwi(pdev) < 0) - netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable\n"); - /* make sure PCI base addr 1 is MMIO */ - if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { - netif_err(tp, probe, dev, - "region #%d not an MMIO resource, aborting\n", - region); + /* use first MMIO region */ + region = ffs(pci_select_bars(pdev, IORESOURCE_MEM)) - 1; + if (region < 0) { + dev_err(&pdev->dev, "no MMIO resource found\n"); return -ENODEV; } /* check for weird/broken PCI region reporting */ if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { - netif_err(tp, probe, dev, - "Invalid PCI region size(s), aborting\n"); + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); return -ENODEV; } rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME); if (rc < 0) { - netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); return rc; } tp->mmio_addr = pcim_iomap_table(pdev)[region]; if (!pci_is_pcie(pdev)) - netif_info(tp, probe, dev, "not PCI Express\n"); + dev_info(&pdev->dev, "not PCI Express\n"); /* Identify chip attached to board */ - rtl8169_get_mac_version(tp, dev, cfg->default_ver); + rtl8169_get_mac_version(tp, cfg->default_ver); - tp->cp_cmd = 0; + tp->cp_cmd = RTL_R16(tp, CPlusCmd); if ((sizeof(dma_addr_t) > 4) && (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && @@ -8239,7 +7704,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc < 0) { - netif_err(tp, probe, dev, "DMA configuration failed\n"); + dev_err(&pdev->dev, "DMA configuration failed\n"); return rc; } } @@ -8257,18 +7722,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); rtl_init_mdio_ops(tp); - rtl_init_pll_power_ops(tp); rtl_init_jumbo_ops(tp); - rtl_init_csi_ops(tp); rtl8169_print_mac_version(tp); chipset = tp->mac_version; - tp->txd_version = rtl_chip_infos[chipset].txd_version; rc = rtl_alloc_irq(tp); if (rc < 0) { - netif_err(tp, probe, dev, "Can't allocate interrupt\n"); + dev_err(&pdev->dev, "Can't allocate interrupt\n"); return rc; } @@ -8296,29 +7758,18 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u64_stats_init(&tp->tx_stats.syncp); /* Get MAC address */ - if (tp->mac_version == RTL_GIGA_MAC_VER_35 || - tp->mac_version == RTL_GIGA_MAC_VER_36 || - tp->mac_version == RTL_GIGA_MAC_VER_37 || - tp->mac_version == RTL_GIGA_MAC_VER_38 || - tp->mac_version == RTL_GIGA_MAC_VER_40 || - tp->mac_version == RTL_GIGA_MAC_VER_41 || - tp->mac_version == RTL_GIGA_MAC_VER_42 || - tp->mac_version == RTL_GIGA_MAC_VER_43 || - tp->mac_version == RTL_GIGA_MAC_VER_44 || - tp->mac_version == RTL_GIGA_MAC_VER_45 || - tp->mac_version == RTL_GIGA_MAC_VER_46 || - tp->mac_version == RTL_GIGA_MAC_VER_47 || - tp->mac_version == RTL_GIGA_MAC_VER_48 || - tp->mac_version == RTL_GIGA_MAC_VER_49 || - tp->mac_version == RTL_GIGA_MAC_VER_50 || - tp->mac_version == RTL_GIGA_MAC_VER_51) { - u16 mac_addr[3]; - + switch (tp->mac_version) { + u8 mac_addr[ETH_ALEN] __aligned(4); + case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); - *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); - if (is_valid_ether_addr((u8 *)mac_addr)) - rtl_rar_set(tp, (u8 *)mac_addr); + if (is_valid_ether_addr(mac_addr)) + rtl_rar_set(tp, mac_addr); + break; + default: + break; } for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); @@ -8326,7 +7777,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->ethtool_ops = &rtl8169_ethtool_ops; dev->watchdog_timeo = RTL8169_TX_TIMEOUT; - netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); /* don't enable SG, IP_CSUM and TSO by default - it might not work * properly for all devices */ @@ -8349,13 +7800,17 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; - if (tp->txd_version == RTL_TD_0) + switch (rtl_chip_infos[chipset].txd_version) { + case RTL_TD_0: tp->tso_csum = rtl8169_tso_csum_v1; - else if (tp->txd_version == RTL_TD_1) { + break; + case RTL_TD_1: tp->tso_csum = rtl8169_tso_csum_v2; dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; - } else + break; + default: WARN_ON_ONCE(1); + } dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; @@ -8368,9 +7823,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->event_slow = cfg->event_slow; tp->coalesce_info = cfg->coalesce_info; - tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? - ~(RxBOVF | RxFOVF) : ~0; - timer_setup(&tp->timer, rtl8169_phy_timer, 0); tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; @@ -8387,15 +7839,15 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc < 0) return rc; - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", - rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr, - (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff), + netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", + rtl_chip_infos[chipset].name, dev->dev_addr, + (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), pci_irq_vector(pdev, 0)); if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " "tx checksumming: %s]\n", rtl_chip_infos[chipset].jumbo_max, - rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko"); } if (r8168_check_dash(tp)) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index b6b90a6314e3..e9007b613f17 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -442,12 +442,33 @@ static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, int enum_index) { - iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->tsu_addr + offset); } static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) { - return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->tsu_addr + offset); +} + +static void sh_eth_soft_swap(char *src, int len) +{ +#ifdef __LITTLE_ENDIAN + u32 *p = (u32 *)src; + u32 *maxp = p + DIV_ROUND_UP(len, sizeof(u32)); + + for (; p < maxp; p++) + *p = swab32(*p); +#endif } static void sh_eth_select_mii(struct net_device *ndev) @@ -456,6 +477,9 @@ static void sh_eth_select_mii(struct net_device *ndev) u32 value; switch (mdp->phy_interface) { + case PHY_INTERFACE_MODE_RGMII ... PHY_INTERFACE_MODE_RGMII_TXID: + value = 0x3; + break; case PHY_INTERFACE_MODE_GMII: value = 0x2; break; @@ -693,7 +717,7 @@ static struct sh_eth_cpu_data rcar_gen1_data = { EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, @@ -725,7 +749,7 @@ static struct sh_eth_cpu_data rcar_gen2_data = { EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, .fdr_value = 0x00000f0f, @@ -740,6 +764,49 @@ static struct sh_eth_cpu_data rcar_gen2_data = { .rmiimode = 1, .magic = 1, }; + +/* R8A77980 */ +static struct sh_eth_cpu_data r8a77980_data = { + .soft_reset = sh_eth_soft_reset_gether, + + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether, + + .register_type = SH_ETH_REG_GIGABIT, + + .edtrr_trns = EDTRR_TRNS_GETHER, + .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP | + ECSIPR_MPDIP, + .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP | + EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP | + EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP | + EESIPR_RMAFIP | EESIPR_RRFIP | + EESIPR_RTLFIP | EESIPR_RTSFIP | + EESIPR_PREIP | EESIPR_CERFIP, + + .tx_check = EESR_FTC | EESR_CD | EESR_TRO, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | + EESR_RFE | EESR_RDE | EESR_RFRMER | + EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x0000070f, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .nbst = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, + .xdfar_rw = 1, + .hw_checksum = 1, + .select_mii = 1, + .magic = 1, + .cexcr = 1, +}; #endif /* CONFIG_OF */ static void sh_eth_set_rate_sh7724(struct net_device *ndev) @@ -775,7 +842,7 @@ static struct sh_eth_cpu_data sh7724_data = { EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, @@ -820,7 +887,7 @@ static struct sh_eth_cpu_data sh7757_data = { EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP, - .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO, + .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO, .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE, @@ -1421,8 +1488,13 @@ static int sh_eth_dev_init(struct net_device *ndev) sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); + /* DMA transfer burst mode */ + if (mdp->cd->nbst) + sh_eth_modify(ndev, EDMR, EDMR_NBST, EDMR_NBST); + + /* Burst cycle count upper-limit */ if (mdp->cd->bculr) - sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ + sh_eth_write(ndev, 0x800, BCULR); sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); @@ -2610,12 +2682,6 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu) } /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ -static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, - int entry) -{ - return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4); -} - static u32 sh_eth_tsu_get_post_mask(int entry) { return 0x0f << (28 - ((entry % 8) * 4)); @@ -2630,27 +2696,25 @@ static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev, int entry) { struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; u32 tmp; - void *reg_offset; - reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); - tmp = ioread32(reg_offset); - iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset); + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg); } static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev, int entry) { struct sh_eth_private *mdp = netdev_priv(ndev); + int reg = TSU_POST1 + entry / 8; u32 post_mask, ref_mask, tmp; - void *reg_offset; - reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry); post_mask = sh_eth_tsu_get_post_mask(entry); ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; - tmp = ioread32(reg_offset); - iowrite32(tmp & ~post_mask, reg_offset); + tmp = sh_eth_tsu_read(mdp, reg); + sh_eth_tsu_write(mdp, tmp & ~post_mask, reg); /* If other port enables, the function returns "true" */ return tmp & ref_mask; @@ -3023,15 +3087,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp, pdev->name, pdev->id); /* register MDIO bus */ - if (dev->of_node) { - ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); - } else { - if (pd->phy_irq > 0) - mdp->mii_bus->irq[pd->phy] = pd->phy_irq; - - ret = mdiobus_register(mdp->mii_bus); - } + if (pd->phy_irq > 0) + mdp->mii_bus->irq[pd->phy] = pd->phy_irq; + ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); if (ret) goto out_free_bus; @@ -3130,6 +3189,7 @@ static const struct of_device_id sh_eth_match_table[] = { { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data }, { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data }, { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, + { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data }, { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 1bf930d4a1e5..726c55a82dd7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -184,6 +184,7 @@ enum GECMR_BIT { /* EDMR */ enum DMAC_M_BIT { + EDMR_NBST = 0x80, EDMR_EL = 0x40, /* Litte endian */ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, EDMR_SRST_GETHER = 0x03, @@ -242,7 +243,7 @@ enum EESR_BIT { EESR_CND = 0x00000800, EESR_DLC = 0x00000400, EESR_CD = 0x00000200, - EESR_RTO = 0x00000100, + EESR_TRO = 0x00000100, EESR_RMAF = 0x00000080, EESR_CEEF = 0x00000040, EESR_CELF = 0x00000020, @@ -262,7 +263,7 @@ enum EESR_BIT { EESR_CERF) /* Recv frame CRC error */ #define DEFAULT_TX_CHECK (EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \ - EESR_RTO) + EESR_TRO) #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \ EESR_RDE | EESR_RFRMER | EESR_ADE | \ EESR_TFE | EESR_TDE) @@ -498,20 +499,21 @@ struct sh_eth_cpu_data { /* hardware features */ unsigned long irq_flags; /* IRQ configuration flags */ - unsigned no_psr:1; /* EtherC DO NOT have PSR */ - unsigned apr:1; /* EtherC have APR */ - unsigned mpr:1; /* EtherC have MPR */ - unsigned tpauser:1; /* EtherC have TPAUSER */ - unsigned bculr:1; /* EtherC have BCULR */ - unsigned tsu:1; /* EtherC have TSU */ - unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */ - unsigned rpadir:1; /* E-DMAC have RPADIR */ - unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ - unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ + unsigned no_psr:1; /* EtherC DOES NOT have PSR */ + unsigned apr:1; /* EtherC has APR */ + unsigned mpr:1; /* EtherC has MPR */ + unsigned tpauser:1; /* EtherC has TPAUSER */ + unsigned bculr:1; /* EtherC has BCULR */ + unsigned tsu:1; /* EtherC has TSU */ + unsigned hw_swap:1; /* E-DMAC has DE bit in EDMR */ + unsigned nbst:1; /* E-DMAC has NBST bit in EDMR */ + unsigned rpadir:1; /* E-DMAC has RPADIR */ + unsigned no_trimd:1; /* E-DMAC DOES NOT have TRIMD */ + unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */ unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */ unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */ unsigned hw_checksum:1; /* E-DMAC has CSMR */ - unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ + unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */ @@ -558,18 +560,6 @@ struct sh_eth_private { unsigned wol_enabled:1; }; -static inline void sh_eth_soft_swap(char *src, int len) -{ -#ifdef __LITTLE_ENDIAN__ - u32 *p = (u32 *)src; - u32 *maxp; - maxp = p + ((len + sizeof(u32) - 1) / sizeof(u32)); - - for (; p < maxp; p++) - *p = swab32(*p); -#endif -} - static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) { diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 056cb6093630..aeafdb9ac015 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1632,6 +1632,9 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, { struct rocker_world_ops *wops = rocker_port->rocker->wops; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + if (!wops->port_obj_vlan_add) return -EOPNOTSUPP; @@ -1647,6 +1650,9 @@ rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, { struct rocker_world_ops *wops = rocker_port->rocker->wops; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + if (!wops->port_obj_vlan_del) return -EOPNOTSUPP; return wops->port_obj_vlan_del(rocker_port, vlan); @@ -2738,6 +2744,8 @@ static void rocker_switchdev_event_work(struct work_struct *work) switch (switchdev_work->event) { case SWITCHDEV_FDB_ADD_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; err = rocker_world_port_fdb_add(rocker_port, fdb_info); if (err) { netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); @@ -2747,6 +2755,8 @@ static void rocker_switchdev_event_work(struct work_struct *work) break; case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = &switchdev_work->fdb_info; + if (!fdb_info->added_by_user) + break; err = rocker_world_port_fdb_del(rocker_port, fdb_info); if (err) netdev_dbg(rocker_port->dev, "fdb add failed err=%d\n", err); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 661828e8fdcf..ad4a354ce570 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1550,6 +1550,38 @@ static int efx_probe_interrupts(struct efx_nic *efx) return 0; } +#if defined(CONFIG_SMP) +static void efx_set_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + unsigned int cpu; + + efx_for_each_channel(channel, efx) { + cpu = cpumask_local_spread(channel->channel, + pcibus_to_node(efx->pci_dev->bus)); + irq_set_affinity_hint(channel->irq, cpumask_of(cpu)); + } +} + +static void efx_clear_interrupt_affinity(struct efx_nic *efx) +{ + struct efx_channel *channel; + + efx_for_each_channel(channel, efx) + irq_set_affinity_hint(channel->irq, NULL); +} +#else +static void +efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +{ +} + +static void +efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused))) +{ +} +#endif /* CONFIG_SMP */ + static int efx_soft_enable_interrupts(struct efx_nic *efx) { struct efx_channel *channel, *end_channel; @@ -3307,6 +3339,7 @@ static void efx_pci_remove_main(struct efx_nic *efx) cancel_work_sync(&efx->reset_work); efx_disable_interrupts(efx); + efx_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); efx_fini_port(efx); efx->type->fini(efx); @@ -3456,6 +3489,8 @@ static int efx_pci_probe_main(struct efx_nic *efx) rc = efx_nic_init_interrupt(efx); if (rc) goto fail5; + + efx_set_interrupt_affinity(efx); rc = efx_enable_interrupts(efx); if (rc) goto fail6; @@ -3463,6 +3498,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) return 0; fail6: + efx_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); fail5: efx_fini_port(efx); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index cece961f2e82..c3ad564ac4c0 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -435,17 +435,18 @@ static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, } while (1); } -/* Remove buffers put into a tx_queue. None of the buffers must have - * an skb attached. +/* Remove buffers put into a tx_queue for the current packet. + * None of the buffers must have an skb attached. */ -static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) +static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, + unsigned int insert_count) { struct efx_tx_buffer *buffer; unsigned int bytes_compl = 0; unsigned int pkts_compl = 0; /* Work backwards until we hit the original insert pointer value */ - while (tx_queue->insert_count != tx_queue->write_count) { + while (tx_queue->insert_count != insert_count) { --tx_queue->insert_count; buffer = __efx_tx_queue_get_insert_buffer(tx_queue); efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); @@ -504,6 +505,8 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, */ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { + unsigned int old_insert_count = tx_queue->insert_count; + bool xmit_more = skb->xmit_more; bool data_mapped = false; unsigned int segments; unsigned int skb_len; @@ -553,8 +556,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Update BQL */ netdev_tx_sent_queue(tx_queue->core_txq, skb_len); + efx_tx_maybe_stop_queue(tx_queue); + /* Pass off to hardware */ - if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); /* There could be packets left on the partner queue if those @@ -577,14 +582,26 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) tx_queue->tx_packets++; } - efx_tx_maybe_stop_queue(tx_queue); - return NETDEV_TX_OK; err: - efx_enqueue_unwind(tx_queue); + efx_enqueue_unwind(tx_queue, old_insert_count); dev_kfree_skb_any(skb); + + /* If we're not expecting another transmit and we had something to push + * on this queue or a partner queue then we need to push here to get the + * previous packets out. + */ + if (!xmit_more) { + struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); + + if (txq2->xmit_more_available) + efx_nic_push_buffers(txq2); + + efx_nic_push_buffers(tx_queue); + } + return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig index 6bcfe27fc560..b80048ca82a0 100644 --- a/drivers/net/ethernet/socionext/Kconfig +++ b/drivers/net/ethernet/socionext/Kconfig @@ -14,6 +14,8 @@ if NET_VENDOR_SOCIONEXT config SNI_AVE tristate "Socionext AVE ethernet support" depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF + depends on HAS_IOMEM + select MFD_SYSCON select PHYLIB ---help--- Driver for gigabit ethernet MACs, called AVE, in the diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 59fbf74dcada..ce8071fc90c4 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1057,7 +1057,8 @@ static int netsec_netdev_load_microcode(struct netsec_priv *priv) return 0; } -static int netsec_reset_hardware(struct netsec_priv *priv) +static int netsec_reset_hardware(struct netsec_priv *priv, + bool load_ucode) { u32 value; int err; @@ -1102,11 +1103,14 @@ static int netsec_reset_hardware(struct netsec_priv *priv) netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG, 1 << NETSEC_REG_DESC_ENDIAN); - err = netsec_netdev_load_microcode(priv); - if (err) { - netif_err(priv, probe, priv->ndev, - "%s: failed to load microcode (%d)\n", __func__, err); - return err; + if (load_ucode) { + err = netsec_netdev_load_microcode(priv); + if (err) { + netif_err(priv, probe, priv->ndev, + "%s: failed to load microcode (%d)\n", + __func__, err); + return err; + } } /* start DMA engines */ @@ -1313,8 +1317,8 @@ static int netsec_netdev_open(struct net_device *ndev) napi_enable(&priv->napi); netif_start_queue(ndev); - /* Enable RX intr. */ - netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX); + /* Enable TX+RX intr. */ + netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX); return 0; err3: @@ -1328,6 +1332,7 @@ err1: static int netsec_netdev_stop(struct net_device *ndev) { + int ret; struct netsec_priv *priv = netdev_priv(ndev); netif_stop_queue(priv->ndev); @@ -1343,12 +1348,14 @@ static int netsec_netdev_stop(struct net_device *ndev) netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + ret = netsec_reset_hardware(priv, false); + phy_stop(ndev->phydev); phy_disconnect(ndev->phydev); pm_runtime_put_sync(priv->dev); - return 0; + return ret; } static int netsec_netdev_init(struct net_device *ndev) @@ -1364,7 +1371,7 @@ static int netsec_netdev_init(struct net_device *ndev) if (ret) goto err1; - ret = netsec_reset_hardware(priv); + ret = netsec_reset_hardware(priv, true); if (ret) goto err2; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 0b3b7a460641..f7ecceeb1e28 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> +#include <linux/mfd/syscon.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -18,6 +19,7 @@ #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/phy.h> +#include <linux/regmap.h> #include <linux/reset.h> #include <linux/types.h> #include <linux/u64_stats_sync.h> @@ -197,8 +199,16 @@ #define AVE_INTM_COUNT 20 #define AVE_FORCE_TXINTCNT 1 +/* SG */ +#define SG_ETPINMODE 0x540 +#define SG_ETPINMODE_EXTPHY BIT(1) /* for LD11 */ +#define SG_ETPINMODE_RMII(ins) BIT(ins) + #define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) +#define AVE_MAX_CLKS 4 +#define AVE_MAX_RSTS 2 + enum desc_id { AVE_DESCID_RX, AVE_DESCID_TX, @@ -225,10 +235,6 @@ struct ave_desc_info { struct ave_desc *desc; /* skb info related descriptor */ }; -struct ave_soc_data { - bool is_desc_64bit; -}; - struct ave_stats { struct u64_stats_sync syncp; u64 packets; @@ -245,11 +251,16 @@ struct ave_private { int phy_id; unsigned int desc_size; u32 msg_enable; - struct clk *clk; - struct reset_control *rst; + int nclks; + struct clk *clk[AVE_MAX_CLKS]; + int nrsts; + struct reset_control *rst[AVE_MAX_RSTS]; phy_interface_t phy_mode; struct phy_device *phydev; struct mii_bus *mdio; + struct regmap *regmap; + unsigned int pinmode_mask; + unsigned int pinmode_val; /* stats */ struct ave_stats stats_rx; @@ -272,6 +283,14 @@ struct ave_private { const struct ave_soc_data *data; }; +struct ave_soc_data { + bool is_desc_64bit; + const char *clock_names[AVE_MAX_CLKS]; + const char *reset_names[AVE_MAX_RSTS]; + int (*get_pinmode)(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg); +}; + static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry, int offset) { @@ -1153,20 +1172,30 @@ static int ave_init(struct net_device *ndev) struct device_node *np = dev->of_node; struct device_node *mdio_np; struct phy_device *phydev; - int ret; + int nc, nr, ret; /* enable clk because of hw access until ndo_open */ - ret = clk_prepare_enable(priv->clk); - if (ret) { - dev_err(dev, "can't enable clock\n"); - return ret; + for (nc = 0; nc < priv->nclks; nc++) { + ret = clk_prepare_enable(priv->clk[nc]); + if (ret) { + dev_err(dev, "can't enable clock\n"); + goto out_clk_disable; + } } - ret = reset_control_deassert(priv->rst); - if (ret) { - dev_err(dev, "can't deassert reset\n"); - goto out_clk_disable; + + for (nr = 0; nr < priv->nrsts; nr++) { + ret = reset_control_deassert(priv->rst[nr]); + if (ret) { + dev_err(dev, "can't deassert reset\n"); + goto out_reset_assert; + } } + ret = regmap_update_bits(priv->regmap, SG_ETPINMODE, + priv->pinmode_mask, priv->pinmode_val); + if (ret) + return ret; + ave_global_reset(ndev); mdio_np = of_get_child_by_name(np, "mdio"); @@ -1207,9 +1236,11 @@ static int ave_init(struct net_device *ndev) out_mdio_unregister: mdiobus_unregister(priv->mdio); out_reset_assert: - reset_control_assert(priv->rst); + while (--nr >= 0) + reset_control_assert(priv->rst[nr]); out_clk_disable: - clk_disable_unprepare(priv->clk); + while (--nc >= 0) + clk_disable_unprepare(priv->clk[nc]); return ret; } @@ -1217,13 +1248,16 @@ out_clk_disable: static void ave_uninit(struct net_device *ndev) { struct ave_private *priv = netdev_priv(ndev); + int i; phy_disconnect(priv->phydev); mdiobus_unregister(priv->mdio); /* disable clk because of hw access after ndo_stop */ - reset_control_assert(priv->rst); - clk_disable_unprepare(priv->clk); + for (i = 0; i < priv->nrsts; i++) + reset_control_assert(priv->rst[i]); + for (i = 0; i < priv->nclks; i++) + clk_disable_unprepare(priv->clk[i]); } static int ave_open(struct net_device *ndev) @@ -1520,6 +1554,7 @@ static int ave_probe(struct platform_device *pdev) const struct ave_soc_data *data; struct device *dev = &pdev->dev; char buf[ETHTOOL_FWVERS_LEN]; + struct of_phandle_args args; phy_interface_t phy_mode; struct ave_private *priv; struct net_device *ndev; @@ -1527,8 +1562,9 @@ static int ave_probe(struct platform_device *pdev) struct resource *res; const void *mac_addr; void __iomem *base; + const char *name; + int i, irq, ret; u64 dma_mask; - int irq, ret; u32 ave_id; data = of_device_get_match_data(dev); @@ -1541,12 +1577,6 @@ static int ave_probe(struct platform_device *pdev) dev_err(dev, "phy-mode not found\n"); return -EINVAL; } - if ((!phy_interface_mode_is_rgmii(phy_mode)) && - phy_mode != PHY_INTERFACE_MODE_RMII && - phy_mode != PHY_INTERFACE_MODE_MII) { - dev_err(dev, "phy-mode is invalid\n"); - return -EINVAL; - } irq = platform_get_irq(pdev, 0); if (irq < 0) { @@ -1614,15 +1644,47 @@ static int ave_probe(struct platform_device *pdev) u64_stats_init(&priv->stats_tx.syncp); u64_stats_init(&priv->stats_rx.syncp); - priv->clk = devm_clk_get(dev, NULL); - if (IS_ERR(priv->clk)) { - ret = PTR_ERR(priv->clk); - goto out_free_netdev; + for (i = 0; i < AVE_MAX_CLKS; i++) { + name = priv->data->clock_names[i]; + if (!name) + break; + priv->clk[i] = devm_clk_get(dev, name); + if (IS_ERR(priv->clk[i])) { + ret = PTR_ERR(priv->clk[i]); + goto out_free_netdev; + } + priv->nclks++; + } + + for (i = 0; i < AVE_MAX_RSTS; i++) { + name = priv->data->reset_names[i]; + if (!name) + break; + priv->rst[i] = devm_reset_control_get_shared(dev, name); + if (IS_ERR(priv->rst[i])) { + ret = PTR_ERR(priv->rst[i]); + goto out_free_netdev; + } + priv->nrsts++; } - priv->rst = devm_reset_control_get_optional_shared(dev, NULL); - if (IS_ERR(priv->rst)) { - ret = PTR_ERR(priv->rst); + ret = of_parse_phandle_with_fixed_args(np, + "socionext,syscon-phy-mode", + 1, 0, &args); + if (ret) { + netdev_err(ndev, "can't get syscon-phy-mode property\n"); + goto out_free_netdev; + } + priv->regmap = syscon_node_to_regmap(args.np); + of_node_put(args.np); + if (IS_ERR(priv->regmap)) { + netdev_err(ndev, "can't map syscon-phy-mode\n"); + ret = PTR_ERR(priv->regmap); + goto out_free_netdev; + } + ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]); + if (ret) { + netdev_err(ndev, "invalid phy-mode setting\n"); goto out_free_netdev; } @@ -1685,24 +1747,148 @@ static int ave_remove(struct platform_device *pdev) return 0; } +static int ave_pro4_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(0); + break; + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_RGMII: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_ld11_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_INTERNAL: + priv->pinmode_val = 0; + break; + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_EXTPHY | SG_ETPINMODE_RMII(0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_ld20_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 0) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(0); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(0); + break; + case PHY_INTERFACE_MODE_RGMII: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ave_pxs3_get_pinmode(struct ave_private *priv, + phy_interface_t phy_mode, u32 arg) +{ + if (arg > 1) + return -EINVAL; + + priv->pinmode_mask = SG_ETPINMODE_RMII(arg); + + switch (phy_mode) { + case PHY_INTERFACE_MODE_RMII: + priv->pinmode_val = SG_ETPINMODE_RMII(arg); + break; + case PHY_INTERFACE_MODE_RGMII: + priv->pinmode_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + static const struct ave_soc_data ave_pro4_data = { .is_desc_64bit = false, + .clock_names = { + "gio", "ether", "ether-gb", "ether-phy", + }, + .reset_names = { + "gio", "ether", + }, + .get_pinmode = ave_pro4_get_pinmode, }; static const struct ave_soc_data ave_pxs2_data = { .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_pro4_get_pinmode, }; static const struct ave_soc_data ave_ld11_data = { .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_ld11_get_pinmode, }; static const struct ave_soc_data ave_ld20_data = { .is_desc_64bit = true, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_ld20_get_pinmode, }; static const struct ave_soc_data ave_pxs3_data = { .is_desc_64bit = false, + .clock_names = { + "ether", + }, + .reset_names = { + "ether", + }, + .get_pinmode = ave_pxs3_get_pinmode, }; static const struct of_device_id of_ave_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 972e4ef6d414..68e9e2640c62 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -4,7 +4,8 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ - dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o $(stmmac-y) + dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \ + stmmac_tc.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index e93c40b4631e..b9c9003060c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -24,7 +24,7 @@ #include "stmmac.h" -static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; unsigned int nopaged_len = skb_headlen(skb); @@ -51,8 +51,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].len = bmax; /* do not close the descriptor and do not set own bit */ - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, - 0, false, skb->len); + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE, + 0, false, skb->len); while (len != 0) { tx_q->tx_skbuff[entry] = NULL; @@ -68,9 +68,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; tx_q->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].len = bmax; - priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, - STMMAC_CHAIN_MODE, 1, - false, skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum, + STMMAC_CHAIN_MODE, 1, false, skb->len); len -= bmax; i++; } else { @@ -83,9 +82,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].buf = des2; tx_q->tx_skbuff_dma[entry].len = len; /* last descriptor can be set now */ - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_CHAIN_MODE, 1, - true, skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_CHAIN_MODE, 1, true, skb->len); len = 0; } } @@ -95,7 +93,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return entry; } -static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) +static unsigned int is_jumbo_frm(int len, int enh_desc) { unsigned int ret = 0; @@ -107,7 +105,7 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) return ret; } -static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, +static void init_dma_chain(void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc) { /* @@ -137,7 +135,7 @@ static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, } } -static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) +static void refill_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr; struct stmmac_priv *priv = rx_q->priv_data; @@ -153,7 +151,7 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) sizeof(struct dma_desc))); } -static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +static void clean_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; struct stmmac_priv *priv = tx_q->priv_data; @@ -171,9 +169,9 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) } const struct stmmac_mode_ops chain_mode_ops = { - .init = stmmac_init_dma_chain, - .is_jumbo_frm = stmmac_is_jumbo_frm, - .jumbo_frm = stmmac_jumbo_frm, - .refill_desc3 = stmmac_refill_desc3, - .clean_desc3 = stmmac_clean_desc3, + .init = init_dma_chain, + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .clean_desc3 = clean_desc3, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index ad2388aee463..78fd0f8b8e81 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -32,12 +32,14 @@ #endif #include "descs.h" +#include "hwif.h" #include "mmc.h" /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 #define DWMAC_CORE_4_00 0x40 +#define DWMAC_CORE_4_10 0x41 #define DWMAC_CORE_5_00 0x50 #define DWMAC_CORE_5_10 0x51 #define STMMAC_CHAN0 0 /* Always supported and default for all chips */ @@ -344,6 +346,8 @@ struct dma_features { /* TX and RX number of queues */ unsigned int number_rx_queues; unsigned int number_tx_queues; + /* PPS output */ + unsigned int pps_out_num; /* Alternate (enhanced) DESC mode */ unsigned int enh_desc; /* TX and RX FIFO sizes */ @@ -351,6 +355,10 @@ struct dma_features { unsigned int rx_fifo_size; /* Automotive Safety Package */ unsigned int asp; + /* RX Parser */ + unsigned int frpsel; + unsigned int frpbs; + unsigned int frpes; }; /* GMAC TX FIFO is 8K, Rx FIFO is 16K */ @@ -377,197 +385,11 @@ struct dma_features { #define JUMBO_LEN 9000 -/* Descriptors helpers */ -struct stmmac_desc_ops { - /* DMA RX descriptor ring initialization */ - void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode, - int end); - /* DMA TX descriptor ring initialization */ - void (*init_tx_desc) (struct dma_desc *p, int mode, int end); - - /* Invoked by the xmit function to prepare the tx descriptor */ - void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, - bool csum_flag, int mode, bool tx_own, - bool ls, unsigned int tot_pkt_len); - void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, - int len2, bool tx_own, bool ls, - unsigned int tcphdrlen, - unsigned int tcppayloadlen); - /* Set/get the owner of the descriptor */ - void (*set_tx_owner) (struct dma_desc *p); - int (*get_tx_owner) (struct dma_desc *p); - /* Clean the tx descriptor as soon as the tx irq is received */ - void (*release_tx_desc) (struct dma_desc *p, int mode); - /* Clear interrupt on tx frame completion. When this bit is - * set an interrupt happens as soon as the frame is transmitted */ - void (*set_tx_ic)(struct dma_desc *p); - /* Last tx segment reports the transmit status */ - int (*get_tx_ls) (struct dma_desc *p); - /* Return the transmit status looking at the TDES1 */ - int (*tx_status) (void *data, struct stmmac_extra_stats *x, - struct dma_desc *p, void __iomem *ioaddr); - /* Get the buffer size from the descriptor */ - int (*get_tx_len) (struct dma_desc *p); - /* Handle extra events on specific interrupts hw dependent */ - void (*set_rx_owner) (struct dma_desc *p); - /* Get the receive frame size */ - int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type); - /* Return the reception status looking at the RDES1 */ - int (*rx_status) (void *data, struct stmmac_extra_stats *x, - struct dma_desc *p); - void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x, - struct dma_extended_desc *p); - /* Set tx timestamp enable bit */ - void (*enable_tx_timestamp) (struct dma_desc *p); - /* get tx timestamp status */ - int (*get_tx_timestamp_status) (struct dma_desc *p); - /* get timestamp value */ - u64(*get_timestamp) (void *desc, u32 ats); - /* get rx timestamp status */ - int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); - /* Display ring */ - void (*display_ring)(void *head, unsigned int size, bool rx); - /* set MSS via context descriptor */ - void (*set_mss)(struct dma_desc *p, unsigned int mss); -}; - extern const struct stmmac_desc_ops enh_desc_ops; extern const struct stmmac_desc_ops ndesc_ops; -/* Specific DMA helpers */ -struct stmmac_dma_ops { - /* DMA core initialization */ - int (*reset)(void __iomem *ioaddr); - void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds); - void (*init_chan)(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, u32 chan); - void (*init_rx_chan)(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_rx_phy, u32 chan); - void (*init_tx_chan)(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx_phy, u32 chan); - /* Configure the AXI Bus Mode Register */ - void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); - /* Dump DMA registers */ - void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); - /* Set tx/rx threshold in the csr6 register - * An invalid value enables the store-and-forward mode */ - void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, - int rxfifosz); - void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, - int fifosz, u8 qmode); - void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, - int fifosz, u8 qmode); - /* To track extra statistic (if supported) */ - void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, - void __iomem *ioaddr); - void (*enable_dma_transmission) (void __iomem *ioaddr); - void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); - void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); - void (*start_tx)(void __iomem *ioaddr, u32 chan); - void (*stop_tx)(void __iomem *ioaddr, u32 chan); - void (*start_rx)(void __iomem *ioaddr, u32 chan); - void (*stop_rx)(void __iomem *ioaddr, u32 chan); - int (*dma_interrupt) (void __iomem *ioaddr, - struct stmmac_extra_stats *x, u32 chan); - /* If supported then get the optional core features */ - void (*get_hw_feature)(void __iomem *ioaddr, - struct dma_features *dma_cap); - /* Program the HW RX Watchdog */ - void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); - void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); - void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); - void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); -}; - struct mac_device_info; -/* Helpers to program the MAC core */ -struct stmmac_ops { - /* MAC core initialization */ - void (*core_init)(struct mac_device_info *hw, struct net_device *dev); - /* Enable the MAC RX/TX */ - void (*set_mac)(void __iomem *ioaddr, bool enable); - /* Enable and verify that the IPC module is supported */ - int (*rx_ipc)(struct mac_device_info *hw); - /* Enable RX Queues */ - void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); - /* RX Queues Priority */ - void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); - /* TX Queues Priority */ - void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); - /* RX Queues Routing */ - void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, - u32 queue); - /* Program RX Algorithms */ - void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); - /* Program TX Algorithms */ - void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); - /* Set MTL TX queues weight */ - void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, - u32 weight, u32 queue); - /* RX MTL queue to RX dma mapping */ - void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); - /* Configure AV Algorithm */ - void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, - u32 idle_slope, u32 high_credit, u32 low_credit, - u32 queue); - /* Dump MAC registers */ - void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); - /* Handle extra events on specific interrupts hw dependent */ - int (*host_irq_status)(struct mac_device_info *hw, - struct stmmac_extra_stats *x); - /* Handle MTL interrupts */ - int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); - /* Multicast filter setting */ - void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); - /* Flow control setting */ - void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, - unsigned int fc, unsigned int pause_time, u32 tx_cnt); - /* Set power management mode (e.g. magic frame) */ - void (*pmt)(struct mac_device_info *hw, unsigned long mode); - /* Set/Get Unicast MAC addresses */ - void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, - unsigned int reg_n); - void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, - unsigned int reg_n); - void (*set_eee_mode)(struct mac_device_info *hw, - bool en_tx_lpi_clockgating); - void (*reset_eee_mode)(struct mac_device_info *hw); - void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); - void (*set_eee_pls)(struct mac_device_info *hw, int link); - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, - u32 rx_queues, u32 tx_queues); - /* PCS calls */ - void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, - bool loopback); - void (*pcs_rane)(void __iomem *ioaddr, bool restart); - void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); - /* Safety Features */ - int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp); - bool (*safety_feat_irq_status)(struct net_device *ndev, - void __iomem *ioaddr, unsigned int asp, - struct stmmac_safety_stats *stats); - const char *(*safety_feat_dump)(struct stmmac_safety_stats *stats, - int index, unsigned long *count); -}; - -/* PTP and HW Timer helpers */ -struct stmmac_hwtimestamp { - void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); - u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, - int gmac4); - int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); - int (*config_addend) (void __iomem *ioaddr, u32 addend); - int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, - int add_sub, int gmac4); - u64(*get_systime) (void __iomem *ioaddr); -}; - extern const struct stmmac_hwtimestamp stmmac_ptp; extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; @@ -590,24 +412,13 @@ struct mii_regs { unsigned int clk_csr_mask; }; -/* Helpers to manage the descriptors for chain and ring modes */ -struct stmmac_mode_ops { - void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, - unsigned int extend_desc); - unsigned int (*is_jumbo_frm) (int len, int ehn_desc); - int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); - int (*set_16kib_bfsize)(int mtu); - void (*init_desc3)(struct dma_desc *p); - void (*refill_desc3) (void *priv, struct dma_desc *p); - void (*clean_desc3) (void *priv, struct dma_desc *p); -}; - struct mac_device_info { const struct stmmac_ops *mac; const struct stmmac_desc_ops *desc; const struct stmmac_dma_ops *dma; const struct stmmac_mode_ops *mode; const struct stmmac_hwtimestamp *ptp; + const struct stmmac_tc_ops *tc; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; void __iomem *pcsr; /* vpointer to device CSRs */ @@ -625,12 +436,9 @@ struct stmmac_rx_routing { u32 reg_shift; }; -struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, - int *synopsys_id); -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id); -struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, int *synopsys_id); +int dwmac100_setup(struct stmmac_priv *priv); +int dwmac1000_setup(struct stmmac_priv *priv); +int dwmac4_setup(struct stmmac_priv *priv); void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); @@ -650,24 +458,4 @@ extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; extern const struct stmmac_desc_ops dwmac4_desc_ops; -/** - * stmmac_get_synopsys_id - return the SYINID. - * @priv: driver private structure - * Description: this simple function is to decode and return the SYINID - * starting from the HW core register. - */ -static inline u32 stmmac_get_synopsys_id(u32 hwid) -{ - /* Check Synopsys Id (not available on old chips) */ - if (likely(hwid)) { - u32 uid = ((hwid & 0x0000ff00) >> 8); - u32 synid = (hwid & 0x000000ff); - - pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", - uid, synid); - - return synid; - } - return 0; -} #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 7cb794094a70..4ff231df7322 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -18,6 +18,7 @@ #include <linux/io.h> #include <linux/ioport.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/of_net.h> #include <linux/mfd/syscon.h> #include <linux/platform_device.h> @@ -29,6 +30,10 @@ #define PRG_ETH0_RGMII_MODE BIT(0) +#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0) +#define PRG_ETH0_EXT_RGMII_MODE 1 +#define PRG_ETH0_EXT_RMII_MODE 4 + /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ #define PRG_ETH0_CLK_M250_SEL_SHIFT 4 #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) @@ -47,12 +52,20 @@ #define MUX_CLK_NUM_PARENTS 2 +struct meson8b_dwmac; + +struct meson8b_dwmac_data { + int (*set_phy_mode)(struct meson8b_dwmac *dwmac); +}; + struct meson8b_dwmac { - struct device *dev; - void __iomem *regs; - phy_interface_t phy_mode; - struct clk *rgmii_tx_clk; - u32 tx_delay_ns; + struct device *dev; + void __iomem *regs; + + const struct meson8b_dwmac_data *data; + phy_interface_t phy_mode; + struct clk *rgmii_tx_clk; + u32 tx_delay_ns; }; struct meson8b_dwmac_clk_configs { @@ -171,6 +184,59 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) return 0; } +static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac) +{ + switch (dwmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* enable RGMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_RGMII_MODE, + PRG_ETH0_RGMII_MODE); + break; + case PHY_INTERFACE_MODE_RMII: + /* disable RGMII mode -> enables RMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_RGMII_MODE, 0); + break; + default: + dev_err(dwmac->dev, "fail to set phy-mode %s\n", + phy_modes(dwmac->phy_mode)); + return -EINVAL; + } + + return 0; +} + +static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac) +{ + switch (dwmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* enable RGMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_EXT_PHY_MODE_MASK, + PRG_ETH0_EXT_RGMII_MODE); + break; + case PHY_INTERFACE_MODE_RMII: + /* disable RGMII mode -> enables RMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_EXT_PHY_MODE_MASK, + PRG_ETH0_EXT_RMII_MODE); + break; + default: + dev_err(dwmac->dev, "fail to set phy-mode %s\n", + phy_modes(dwmac->phy_mode)); + return -EINVAL; + } + + return 0; +} + static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) { int ret; @@ -188,10 +254,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_TXID: - /* enable RGMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, - PRG_ETH0_RGMII_MODE); - /* only relevant for RMII mode -> disable in RGMII mode */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, 0); @@ -224,10 +286,6 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) break; case PHY_INTERFACE_MODE_RMII: - /* disable RGMII mode -> enables RMII mode */ - meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, - 0); - /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_INVERTED_RMII_CLK, @@ -274,6 +332,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + dwmac->data = (const struct meson8b_dwmac_data *) + of_device_get_match_data(&pdev->dev); + if (!dwmac->data) + return -EINVAL; + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dwmac->regs)) { @@ -298,6 +361,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) if (ret) goto err_remove_config_dt; + ret = dwmac->data->set_phy_mode(dwmac); + if (ret) + goto err_remove_config_dt; + ret = meson8b_init_prg_eth(dwmac); if (ret) goto err_remove_config_dt; @@ -316,10 +383,31 @@ err_remove_config_dt: return ret; } +static const struct meson8b_dwmac_data meson8b_dwmac_data = { + .set_phy_mode = meson8b_set_phy_mode, +}; + +static const struct meson8b_dwmac_data meson_axg_dwmac_data = { + .set_phy_mode = meson_axg_set_phy_mode, +}; + static const struct of_device_id meson8b_dwmac_match[] = { - { .compatible = "amlogic,meson8b-dwmac" }, - { .compatible = "amlogic,meson8m2-dwmac" }, - { .compatible = "amlogic,meson-gxbb-dwmac" }, + { + .compatible = "amlogic,meson8b-dwmac", + .data = &meson8b_dwmac_data, + }, + { + .compatible = "amlogic,meson8m2-dwmac", + .data = &meson8b_dwmac_data, + }, + { + .compatible = "amlogic,meson-gxbb-dwmac", + .data = &meson8b_dwmac_data, + }, + { + .compatible = "amlogic,meson-axg-dwmac", + .data = &meson_axg_dwmac_data, + }, { } }; MODULE_DEVICE_TABLE(of, meson8b_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 13133b30b575..f08625a02cea 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1104,30 +1104,20 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) } else { if (bsp_priv->clk_enabled) { if (phy_iface == PHY_INTERFACE_MODE_RMII) { - if (!IS_ERR(bsp_priv->mac_clk_rx)) - clk_disable_unprepare( - bsp_priv->mac_clk_rx); + clk_disable_unprepare(bsp_priv->mac_clk_rx); - if (!IS_ERR(bsp_priv->clk_mac_ref)) - clk_disable_unprepare( - bsp_priv->clk_mac_ref); + clk_disable_unprepare(bsp_priv->clk_mac_ref); - if (!IS_ERR(bsp_priv->clk_mac_refout)) - clk_disable_unprepare( - bsp_priv->clk_mac_refout); + clk_disable_unprepare(bsp_priv->clk_mac_refout); } - if (!IS_ERR(bsp_priv->clk_phy)) - clk_disable_unprepare(bsp_priv->clk_phy); + clk_disable_unprepare(bsp_priv->clk_phy); - if (!IS_ERR(bsp_priv->aclk_mac)) - clk_disable_unprepare(bsp_priv->aclk_mac); + clk_disable_unprepare(bsp_priv->aclk_mac); - if (!IS_ERR(bsp_priv->pclk_mac)) - clk_disable_unprepare(bsp_priv->pclk_mac); + clk_disable_unprepare(bsp_priv->pclk_mac); - if (!IS_ERR(bsp_priv->mac_clk_tx)) - clk_disable_unprepare(bsp_priv->mac_clk_tx); + clk_disable_unprepare(bsp_priv->mac_clk_tx); /** * if (!IS_ERR(bsp_priv->clk_mac)) * clk_disable_unprepare(bsp_priv->clk_mac); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 9e6db16af663..7e2e79dedebf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -16,49 +16,180 @@ #include <linux/of_net.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/stmmac.h> #include "stmmac_platform.h" -#define MII_PHY_SEL_MASK BIT(23) +#define SYSCFG_MCU_ETH_MASK BIT(23) +#define SYSCFG_MP1_ETH_MASK GENMASK(23, 16) + +#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16) +#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17) +#define SYSCFG_PMCR_ETH_SEL_MII BIT(20) +#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21) +#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23) +#define SYSCFG_PMCR_ETH_SEL_GMII 0 +#define SYSCFG_MCU_ETH_SEL_MII 0 +#define SYSCFG_MCU_ETH_SEL_RMII 1 struct stm32_dwmac { struct clk *clk_tx; struct clk *clk_rx; + struct clk *clk_eth_ck; + struct clk *clk_ethstp; + struct clk *syscfg_clk; + bool int_phyclk; /* Clock from RCC to drive PHY */ u32 mode_reg; /* MAC glue-logic mode register */ struct regmap *regmap; u32 speed; + const struct stm32_ops *ops; + struct device *dev; +}; + +struct stm32_ops { + int (*set_mode)(struct plat_stmmacenet_data *plat_dat); + int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare); + int (*suspend)(struct stm32_dwmac *dwmac); + void (*resume)(struct stm32_dwmac *dwmac); + int (*parse_data)(struct stm32_dwmac *dwmac, + struct device *dev); + u32 syscfg_eth_mask; }; static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; - u32 reg = dwmac->mode_reg; - u32 val; int ret; - val = (plat_dat->interface == PHY_INTERFACE_MODE_MII) ? 0 : 1; - ret = regmap_update_bits(dwmac->regmap, reg, MII_PHY_SEL_MASK, val); - if (ret) - return ret; + if (dwmac->ops->set_mode) { + ret = dwmac->ops->set_mode(plat_dat); + if (ret) + return ret; + } ret = clk_prepare_enable(dwmac->clk_tx); if (ret) return ret; - ret = clk_prepare_enable(dwmac->clk_rx); - if (ret) - clk_disable_unprepare(dwmac->clk_tx); + if (!dwmac->dev->power.is_suspended) { + ret = clk_prepare_enable(dwmac->clk_rx); + if (ret) { + clk_disable_unprepare(dwmac->clk_tx); + return ret; + } + } + + if (dwmac->ops->clk_prepare) { + ret = dwmac->ops->clk_prepare(dwmac, true); + if (ret) { + clk_disable_unprepare(dwmac->clk_rx); + clk_disable_unprepare(dwmac->clk_tx); + } + } return ret; } +static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) +{ + int ret = 0; + + if (prepare) { + ret = clk_prepare_enable(dwmac->syscfg_clk); + if (ret) + return ret; + + if (dwmac->int_phyclk) { + ret = clk_prepare_enable(dwmac->clk_eth_ck); + if (ret) { + clk_disable_unprepare(dwmac->syscfg_clk); + return ret; + } + } + } else { + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->int_phyclk) + clk_disable_unprepare(dwmac->clk_eth_ck); + } + return ret; +} + +static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = SYSCFG_PMCR_ETH_SEL_MII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + break; + case PHY_INTERFACE_MODE_GMII: + val = SYSCFG_PMCR_ETH_SEL_GMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n"); + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_PMCR_ETH_SEL_RMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); + break; + case PHY_INTERFACE_MODE_RGMII: + val = SYSCFG_PMCR_ETH_SEL_RGMII; + if (dwmac->int_phyclk) + val |= SYSCFG_PMCR_ETH_CLK_SEL; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n"); + break; + default: + pr_debug("SYSCFG init : Do not manage %d interface\n", + plat_dat->interface); + /* Do not manage others interfaces */ + return -EINVAL; + } + + return regmap_update_bits(dwmac->regmap, reg, + dwmac->ops->syscfg_eth_mask, val); +} + +static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val; + + switch (plat_dat->interface) { + case PHY_INTERFACE_MODE_MII: + val = SYSCFG_MCU_ETH_SEL_MII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_MCU_ETH_SEL_RMII; + pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); + break; + default: + pr_debug("SYSCFG init : Do not manage %d interface\n", + plat_dat->interface); + /* Do not manage others interfaces */ + return -EINVAL; + } + + return regmap_update_bits(dwmac->regmap, reg, + dwmac->ops->syscfg_eth_mask, val); +} + static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) { clk_disable_unprepare(dwmac->clk_tx); clk_disable_unprepare(dwmac->clk_rx); + + if (dwmac->ops->clk_prepare) + dwmac->ops->clk_prepare(dwmac, false); } static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, @@ -70,15 +201,22 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, /* Get TX/RX clocks */ dwmac->clk_tx = devm_clk_get(dev, "mac-clk-tx"); if (IS_ERR(dwmac->clk_tx)) { - dev_err(dev, "No tx clock provided...\n"); + dev_err(dev, "No ETH Tx clock provided...\n"); return PTR_ERR(dwmac->clk_tx); } + dwmac->clk_rx = devm_clk_get(dev, "mac-clk-rx"); if (IS_ERR(dwmac->clk_rx)) { - dev_err(dev, "No rx clock provided...\n"); + dev_err(dev, "No ETH Rx clock provided...\n"); return PTR_ERR(dwmac->clk_rx); } + if (dwmac->ops->parse_data) { + err = dwmac->ops->parse_data(dwmac, dev); + if (err) + return err; + } + /* Get mode register */ dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon"); if (IS_ERR(dwmac->regmap)) @@ -91,11 +229,46 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, return err; } +static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, + struct device *dev) +{ + struct device_node *np = dev->of_node; + + dwmac->int_phyclk = of_property_read_bool(np, "st,int-phyclk"); + + /* Check if internal clk from RCC selected */ + if (dwmac->int_phyclk) { + /* Get ETH_CLK clocks */ + dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck"); + if (IS_ERR(dwmac->clk_eth_ck)) { + dev_err(dev, "No ETH CK clock provided...\n"); + return PTR_ERR(dwmac->clk_eth_ck); + } + } + + /* Clock used for low power mode */ + dwmac->clk_ethstp = devm_clk_get(dev, "ethstp"); + if (IS_ERR(dwmac->clk_ethstp)) { + dev_err(dev, "No ETH peripheral clock provided for CStop mode ...\n"); + return PTR_ERR(dwmac->clk_ethstp); + } + + /* Clock for sysconfig */ + dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk"); + if (IS_ERR(dwmac->syscfg_clk)) { + dev_err(dev, "No syscfg clock provided...\n"); + return PTR_ERR(dwmac->syscfg_clk); + } + + return 0; +} + static int stm32_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; struct stmmac_resources stmmac_res; struct stm32_dwmac *dwmac; + const struct stm32_ops *data; int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); @@ -112,6 +285,16 @@ static int stm32_dwmac_probe(struct platform_device *pdev) goto err_remove_config_dt; } + data = of_device_get_match_data(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, "no of match data provided\n"); + ret = -EINVAL; + goto err_remove_config_dt; + } + + dwmac->ops = data; + dwmac->dev = &pdev->dev; + ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); @@ -149,15 +332,48 @@ static int stm32_dwmac_remove(struct platform_device *pdev) return ret; } +static int stm32mp1_suspend(struct stm32_dwmac *dwmac) +{ + int ret = 0; + + ret = clk_prepare_enable(dwmac->clk_ethstp); + if (ret) + return ret; + + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->syscfg_clk); + if (dwmac->int_phyclk) + clk_disable_unprepare(dwmac->clk_eth_ck); + + return ret; +} + +static void stm32mp1_resume(struct stm32_dwmac *dwmac) +{ + clk_disable_unprepare(dwmac->clk_ethstp); +} + +static int stm32mcu_suspend(struct stm32_dwmac *dwmac) +{ + clk_disable_unprepare(dwmac->clk_tx); + clk_disable_unprepare(dwmac->clk_rx); + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int stm32_dwmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); + struct stm32_dwmac *dwmac = priv->plat->bsp_priv; + int ret; ret = stmmac_suspend(dev); - stm32_dwmac_clk_disable(priv->plat->bsp_priv); + + if (dwmac->ops->suspend) + ret = dwmac->ops->suspend(dwmac); return ret; } @@ -166,8 +382,12 @@ static int stm32_dwmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); + struct stm32_dwmac *dwmac = priv->plat->bsp_priv; int ret; + if (dwmac->ops->resume) + dwmac->ops->resume(dwmac); + ret = stm32_dwmac_init(priv->plat); if (ret) return ret; @@ -181,8 +401,24 @@ static int stm32_dwmac_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, stm32_dwmac_suspend, stm32_dwmac_resume); +static struct stm32_ops stm32mcu_dwmac_data = { + .set_mode = stm32mcu_set_mode, + .suspend = stm32mcu_suspend, + .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK +}; + +static struct stm32_ops stm32mp1_dwmac_data = { + .set_mode = stm32mp1_set_mode, + .clk_prepare = stm32mp1_clk_prepare, + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, + .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK +}; + static const struct of_device_id stm32_dwmac_match[] = { - { .compatible = "st,stm32-dwmac"}, + { .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data}, + { .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data}, { } }; MODULE_DEVICE_TABLE(of, stm32_dwmac_match); @@ -199,5 +435,6 @@ static struct platform_driver stm32_dwmac_driver = { module_platform_driver(stm32_dwmac_driver); MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@gmail.com>"); -MODULE_DESCRIPTION("STMicroelectronics MCU DWMAC Specific Glue layer"); +MODULE_AUTHOR("Christophe Roullier <christophe.roullier@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 DWMAC Specific Glue layer"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index a3fa65b1ca8e..2e6e2a96b4f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -42,17 +42,27 @@ * This value is used for disabling properly EMAC * and used as a good starting value in case of the * boot process(uboot) leave some stuff. + * @syscon_field reg_field for the syscon's gmac register * @soc_has_internal_phy: Does the MAC embed an internal PHY * @support_mii: Does the MAC handle MII * @support_rmii: Does the MAC handle RMII * @support_rgmii: Does the MAC handle RGMII + * + * @rx_delay_max: Maximum raw value for RX delay chain + * @tx_delay_max: Maximum raw value for TX delay chain + * These two also indicate the bitmask for + * the RX and TX delay chain registers. A + * value of zero indicates this is not supported. */ struct emac_variant { u32 default_syscon_value; + const struct reg_field *syscon_field; bool soc_has_internal_phy; bool support_mii; bool support_rmii; bool support_rgmii; + u8 rx_delay_max; + u8 tx_delay_max; }; /* struct sunxi_priv_data - hold all sunxi private data @@ -71,38 +81,70 @@ struct sunxi_priv_data { struct regulator *regulator; struct reset_control *rst_ephy; const struct emac_variant *variant; - struct regmap *regmap; + struct regmap_field *regmap_field; bool internal_phy_powered; void *mux_handle; }; +/* EMAC clock register @ 0x30 in the "system control" address range */ +static const struct reg_field sun8i_syscon_reg_field = { + .reg = 0x30, + .lsb = 0, + .msb = 31, +}; + +/* EMAC clock register @ 0x164 in the CCU address range */ +static const struct reg_field sun8i_ccu_reg_field = { + .reg = 0x164, + .lsb = 0, + .msb = 31, +}; + static const struct emac_variant emac_variant_h3 = { .default_syscon_value = 0x58000, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = true, .support_mii = true, .support_rmii = true, - .support_rgmii = true + .support_rgmii = true, + .rx_delay_max = 31, + .tx_delay_max = 7, }; static const struct emac_variant emac_variant_v3s = { .default_syscon_value = 0x38000, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = true, .support_mii = true }; static const struct emac_variant emac_variant_a83t = { .default_syscon_value = 0, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = false, .support_mii = true, - .support_rgmii = true + .support_rgmii = true, + .rx_delay_max = 31, + .tx_delay_max = 7, +}; + +static const struct emac_variant emac_variant_r40 = { + .default_syscon_value = 0, + .syscon_field = &sun8i_ccu_reg_field, + .support_mii = true, + .support_rgmii = true, + .rx_delay_max = 7, }; static const struct emac_variant emac_variant_a64 = { .default_syscon_value = 0, + .syscon_field = &sun8i_syscon_reg_field, .soc_has_internal_phy = false, .support_mii = true, .support_rmii = true, - .support_rgmii = true + .support_rgmii = true, + .rx_delay_max = 31, + .tx_delay_max = 7, }; #define EMAC_BASIC_CTL0 0x00 @@ -206,9 +248,7 @@ static const struct emac_variant emac_variant_a64 = { #define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */ /* Generic system control EMAC_CLK bits */ -#define SYSCON_ETXDC_MASK GENMASK(2, 0) #define SYSCON_ETXDC_SHIFT 10 -#define SYSCON_ERXDC_MASK GENMASK(4, 0) #define SYSCON_ERXDC_SHIFT 5 /* EMAC PHY Interface Type */ #define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */ @@ -216,7 +256,6 @@ static const struct emac_variant emac_variant_a64 = { #define SYSCON_ETCS_MII 0x0 #define SYSCON_ETCS_EXT_GMII 0x1 #define SYSCON_ETCS_INT_GMII 0x2 -#define SYSCON_EMAC_REG 0x30 /* sun8i_dwmac_dma_reset() - reset the EMAC * Called from stmmac via stmmac_dma_ops->reset @@ -237,17 +276,28 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) * Called from stmmac via stmmac_dma_ops->init */ static void sun8i_dwmac_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { - /* Write TX and RX descriptors address */ - writel(dma_rx, ioaddr + EMAC_RX_DESC_LIST); - writel(dma_tx, ioaddr + EMAC_TX_DESC_LIST); - writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); } +static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) +{ + /* Write RX descriptors address */ + writel(dma_rx_phy, ioaddr + EMAC_RX_DESC_LIST); +} + +static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + /* Write TX descriptors address */ + writel(dma_tx_phy, ioaddr + EMAC_TX_DESC_LIST); +} + /* sun8i_dwmac_dump_regs() - Dump EMAC address space * Called from stmmac_dma_ops->dump_regs * Used for ethtool @@ -398,13 +448,36 @@ static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr, return ret; } -static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode, int rxfifosz) +static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 v; + + v = readl(ioaddr + EMAC_RX_CTL1); + if (mode == SF_DMA_MODE) { + v |= EMAC_RX_MD; + } else { + v &= ~EMAC_RX_MD; + v &= ~EMAC_RX_TH_MASK; + if (mode < 32) + v |= EMAC_RX_TH_32; + else if (mode < 64) + v |= EMAC_RX_TH_64; + else if (mode < 96) + v |= EMAC_RX_TH_96; + else if (mode < 128) + v |= EMAC_RX_TH_128; + } + writel(v, ioaddr + EMAC_RX_CTL1); +} + +static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) { u32 v; v = readl(ioaddr + EMAC_TX_CTL1); - if (txmode == SF_DMA_MODE) { + if (mode == SF_DMA_MODE) { v |= EMAC_TX_MD; /* Undocumented bit (called TX_NEXT_FRM in BSP), the original * comment is @@ -415,40 +488,26 @@ static void sun8i_dwmac_dma_operation_mode(void __iomem *ioaddr, int txmode, } else { v &= ~EMAC_TX_MD; v &= ~EMAC_TX_TH_MASK; - if (txmode < 64) + if (mode < 64) v |= EMAC_TX_TH_64; - else if (txmode < 128) + else if (mode < 128) v |= EMAC_TX_TH_128; - else if (txmode < 192) + else if (mode < 192) v |= EMAC_TX_TH_192; - else if (txmode < 256) + else if (mode < 256) v |= EMAC_TX_TH_256; } writel(v, ioaddr + EMAC_TX_CTL1); - - v = readl(ioaddr + EMAC_RX_CTL1); - if (rxmode == SF_DMA_MODE) { - v |= EMAC_RX_MD; - } else { - v &= ~EMAC_RX_MD; - v &= ~EMAC_RX_TH_MASK; - if (rxmode < 32) - v |= EMAC_RX_TH_32; - else if (rxmode < 64) - v |= EMAC_RX_TH_64; - else if (rxmode < 96) - v |= EMAC_RX_TH_96; - else if (rxmode < 128) - v |= EMAC_RX_TH_128; - } - writel(v, ioaddr + EMAC_RX_CTL1); } static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { .reset = sun8i_dwmac_dma_reset, .init = sun8i_dwmac_dma_init, + .init_rx_chan = sun8i_dwmac_dma_init_rx, + .init_tx_chan = sun8i_dwmac_dma_init_tx, .dump_regs = sun8i_dwmac_dump_regs, - .dma_mode = sun8i_dwmac_dma_operation_mode, + .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx, + .dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx, .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission, .enable_dma_irq = sun8i_dwmac_enable_dma_irq, .disable_dma_irq = sun8i_dwmac_disable_dma_irq, @@ -745,7 +804,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, bool need_power_ephy = false; if (current_child ^ desired_child) { - regmap_read(gmac->regmap, SYSCON_EMAC_REG, ®); + regmap_field_read(gmac->regmap_field, ®); switch (desired_child) { case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: dev_info(priv->device, "Switch mux to internal PHY"); @@ -763,7 +822,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, desired_child); return -EINVAL; } - regmap_write(gmac->regmap, SYSCON_EMAC_REG, val); + regmap_field_write(gmac->regmap_field, val); if (need_power_ephy) { ret = sun8i_dwmac_power_internal_phy(priv); if (ret) @@ -801,7 +860,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) int ret; u32 reg, val; - regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val); + regmap_field_read(gmac->regmap_field, &val); reg = gmac->variant->default_syscon_value; if (reg != val) dev_warn(priv->device, @@ -835,8 +894,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) } val /= 100; dev_dbg(priv->device, "set tx-delay to %x\n", val); - if (val <= SYSCON_ETXDC_MASK) { - reg &= ~(SYSCON_ETXDC_MASK << SYSCON_ETXDC_SHIFT); + if (val <= gmac->variant->tx_delay_max) { + reg &= ~(gmac->variant->tx_delay_max << + SYSCON_ETXDC_SHIFT); reg |= (val << SYSCON_ETXDC_SHIFT); } else { dev_err(priv->device, "Invalid TX clock delay: %d\n", @@ -852,8 +912,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) } val /= 100; dev_dbg(priv->device, "set rx-delay to %x\n", val); - if (val <= SYSCON_ERXDC_MASK) { - reg &= ~(SYSCON_ERXDC_MASK << SYSCON_ERXDC_SHIFT); + if (val <= gmac->variant->rx_delay_max) { + reg &= ~(gmac->variant->rx_delay_max << + SYSCON_ERXDC_SHIFT); reg |= (val << SYSCON_ERXDC_SHIFT); } else { dev_err(priv->device, "Invalid RX clock delay: %d\n", @@ -883,7 +944,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) return -EINVAL; } - regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); + regmap_field_write(gmac->regmap_field, reg); return 0; } @@ -892,7 +953,7 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac) { u32 reg = gmac->variant->default_syscon_value; - regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); + regmap_field_write(gmac->regmap_field, reg); } static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) @@ -971,6 +1032,34 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) return mac; } +static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node) +{ + struct device_node *syscon_node; + struct platform_device *syscon_pdev; + struct regmap *regmap = NULL; + + syscon_node = of_parse_phandle(node, "syscon", 0); + if (!syscon_node) + return ERR_PTR(-ENODEV); + + syscon_pdev = of_find_device_by_node(syscon_node); + if (!syscon_pdev) { + /* platform device might not be probed yet */ + regmap = ERR_PTR(-EPROBE_DEFER); + goto out_put_node; + } + + /* If no regmap is found then the other device driver is at fault */ + regmap = dev_get_regmap(&syscon_pdev->dev, NULL); + if (!regmap) + regmap = ERR_PTR(-EINVAL); + + platform_device_put(syscon_pdev); +out_put_node: + of_node_put(syscon_node); + return regmap; +} + static int sun8i_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -980,6 +1069,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) int ret; struct stmmac_priv *priv; struct net_device *ndev; + struct regmap *regmap; ret = stmmac_get_platform_resources(pdev, &stmmac_res); if (ret) @@ -1014,14 +1104,41 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) gmac->regulator = NULL; } - gmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, - "syscon"); - if (IS_ERR(gmac->regmap)) { - ret = PTR_ERR(gmac->regmap); + /* The "GMAC clock control" register might be located in the + * CCU address range (on the R40), or the system control address + * range (on most other sun8i and later SoCs). + * + * The former controls most if not all clocks in the SoC. The + * latter has an SoC identification register, and on some SoCs, + * controls to map device specific SRAM to either the intended + * peripheral, or the CPU address space. + * + * In either case, there should be a coordinated and restricted + * method of accessing the register needed here. This is done by + * having the device export a custom regmap, instead of a generic + * syscon, which grants all access to all registers. + * + * To support old device trees, we fall back to using the syscon + * interface if possible. + */ + regmap = sun8i_dwmac_get_syscon_from_dev(pdev->dev.of_node); + if (IS_ERR(regmap)) + regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "syscon"); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret); return ret; } + gmac->regmap_field = devm_regmap_field_alloc(dev, regmap, + *gmac->variant->syscon_field); + if (IS_ERR(gmac->regmap_field)) { + ret = PTR_ERR(gmac->regmap_field); + dev_err(dev, "Unable to map syscon register: %d\n", ret); + return ret; + } + plat_dat->interface = of_get_phy_mode(dev->of_node); /* platform data specifying hardware features and callbacks. @@ -1078,6 +1195,8 @@ static const struct of_device_id sun8i_dwmac_match[] = { .data = &emac_variant_v3s }, { .compatible = "allwinner,sun8i-a83t-emac", .data = &emac_variant_a83t }, + { .compatible = "allwinner,sun8i-r40-gmac", + .data = &emac_variant_r40 }, { .compatible = "allwinner,sun50i-a64-emac", .data = &emac_variant_a64 }, { } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index c02d36629c52..184ca13c8f79 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -29,7 +29,6 @@ #define GMAC_MII_DATA 0x00000014 /* MII Data */ #define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ #define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ -#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ #define GMAC_DEBUG 0x00000024 /* GMAC debug register */ #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index ef10baf14186..0877bde6e860 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -27,6 +27,7 @@ #include <linux/ethtool.h> #include <net/dsa.h> #include <asm/io.h> +#include "stmmac.h" #include "stmmac_pcs.h" #include "dwmac1000.h" @@ -498,7 +499,7 @@ static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, x->mac_gmii_rx_proto_engine++; } -static const struct stmmac_ops dwmac1000_ops = { +const struct stmmac_ops dwmac1000_ops = { .core_init = dwmac1000_core_init, .set_mac = stmmac_set_mac, .rx_ipc = dwmac1000_rx_ipc_enable, @@ -519,28 +520,21 @@ static const struct stmmac_ops dwmac1000_ops = { .pcs_get_adv_lp = dwmac1000_get_adv_lp, }; -struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, - int *synopsys_id) +int dwmac1000_setup(struct stmmac_priv *priv) { - struct mac_device_info *mac; - u32 hwid = readl(ioaddr + GMAC_VERSION); + struct mac_device_info *mac = priv->hw; - mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); - if (!mac) - return NULL; + dev_info(priv->device, "\tDWMAC1000\n"); - mac->pcsr = ioaddr; - mac->multicast_filter_bins = mcbins; - mac->unicast_filter_entries = perfect_uc_entries; + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; mac->mcast_bits_log2 = 0; if (mac->multicast_filter_bins) mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); - mac->mac = &dwmac1000_ops; - mac->dma = &dwmac1000_dma_ops; - mac->link.duplex = GMAC_CONTROL_DM; mac->link.speed10 = GMAC_CONTROL_PS; mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; @@ -555,8 +549,5 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, mac->mii.clk_csr_shift = 2; mac->mii.clk_csr_mask = GENMASK(5, 2); - /* Get and dump the chip ID */ - *synopsys_id = stmmac_get_synopsys_id(hwid); - - return mac; + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 7ecf549c7f1c..aacc4aa80e3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -81,8 +81,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) } static void dwmac1000_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { u32 value = readl(ioaddr + DMA_BUS_MODE); int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; @@ -119,12 +118,22 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} - /* RX/TX descriptor base address lists must be written into - * DMA CSR3 and CSR4, respectively - */ - writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); - writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); +static void dwmac1000_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) +{ + /* RX descriptor base address list must be written into DMA CSR3 */ + writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac1000_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + /* TX descriptor base address list must be written into DMA CSR4 */ + writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR); } static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) @@ -148,12 +157,40 @@ static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) return csr6; } -static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode, int rxfifosz) +static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (mode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + csr6 |= DMA_CONTROL_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode); + csr6 &= ~DMA_CONTROL_RSF; + csr6 &= DMA_CONTROL_TC_RX_MASK; + if (mode <= 32) + csr6 |= DMA_CONTROL_RTC_32; + else if (mode <= 64) + csr6 |= DMA_CONTROL_RTC_64; + else if (mode <= 96) + csr6 |= DMA_CONTROL_RTC_96; + else + csr6 |= DMA_CONTROL_RTC_128; + } + + /* Configure flow control based on rx fifo size */ + csr6 = dwmac1000_configure_fc(csr6, fifosz); + + writel(csr6, ioaddr + DMA_CONTROL); +} + +static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) { u32 csr6 = readl(ioaddr + DMA_CONTROL); - if (txmode == SF_DMA_MODE) { + if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable TX store and forward mode\n"); /* Transmit COE type 2 cannot be done in cut-through mode. */ csr6 |= DMA_CONTROL_TSF; @@ -162,42 +199,22 @@ static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode, */ csr6 |= DMA_CONTROL_OSF; } else { - pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode); csr6 &= ~DMA_CONTROL_TSF; csr6 &= DMA_CONTROL_TC_TX_MASK; /* Set the transmit threshold */ - if (txmode <= 32) + if (mode <= 32) csr6 |= DMA_CONTROL_TTC_32; - else if (txmode <= 64) + else if (mode <= 64) csr6 |= DMA_CONTROL_TTC_64; - else if (txmode <= 128) + else if (mode <= 128) csr6 |= DMA_CONTROL_TTC_128; - else if (txmode <= 192) + else if (mode <= 192) csr6 |= DMA_CONTROL_TTC_192; else csr6 |= DMA_CONTROL_TTC_256; } - if (rxmode == SF_DMA_MODE) { - pr_debug("GMAC: enable RX store and forward mode\n"); - csr6 |= DMA_CONTROL_RSF; - } else { - pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); - csr6 &= ~DMA_CONTROL_RSF; - csr6 &= DMA_CONTROL_TC_RX_MASK; - if (rxmode <= 32) - csr6 |= DMA_CONTROL_RTC_32; - else if (rxmode <= 64) - csr6 |= DMA_CONTROL_RTC_64; - else if (rxmode <= 96) - csr6 |= DMA_CONTROL_RTC_96; - else - csr6 |= DMA_CONTROL_RTC_128; - } - - /* Configure flow control based on rx fifo size */ - csr6 = dwmac1000_configure_fc(csr6, rxfifosz); - writel(csr6, ioaddr + DMA_CONTROL); } @@ -256,9 +273,12 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt, const struct stmmac_dma_ops dwmac1000_dma_ops = { .reset = dwmac_dma_reset, .init = dwmac1000_dma_init, + .init_rx_chan = dwmac1000_dma_init_rx, + .init_tx_chan = dwmac1000_dma_init_tx, .axi = dwmac1000_dma_axi, .dump_regs = dwmac1000_dump_dma_regs, - .dma_mode = dwmac1000_dma_operation_mode, + .dma_rx_mode = dwmac1000_dma_operation_mode_rx, + .dma_tx_mode = dwmac1000_dma_operation_mode_tx, .enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_irq = dwmac_enable_dma_irq, .disable_dma_irq = dwmac_disable_dma_irq, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 91b23f9db31a..b735143987e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -27,6 +27,7 @@ #include <linux/crc32.h> #include <net/dsa.h> #include <asm/io.h> +#include "stmmac.h" #include "dwmac100.h" static void dwmac100_core_init(struct mac_device_info *hw, @@ -159,7 +160,7 @@ static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode) return; } -static const struct stmmac_ops dwmac100_ops = { +const struct stmmac_ops dwmac100_ops = { .core_init = dwmac100_core_init, .set_mac = stmmac_set_mac, .rx_ipc = dwmac100_rx_ipc_enable, @@ -172,20 +173,13 @@ static const struct stmmac_ops dwmac100_ops = { .get_umac_addr = dwmac100_get_umac_addr, }; -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id) +int dwmac100_setup(struct stmmac_priv *priv) { - struct mac_device_info *mac; + struct mac_device_info *mac = priv->hw; - mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); - if (!mac) - return NULL; - - pr_info("\tDWMAC100\n"); - - mac->pcsr = ioaddr; - mac->mac = &dwmac100_ops; - mac->dma = &dwmac100_dma_ops; + dev_info(priv->device, "\tDWMAC100\n"); + mac->pcsr = priv->ioaddr; mac->link.duplex = MAC_CONTROL_F; mac->link.speed10 = 0; mac->link.speed100 = 0; @@ -200,8 +194,5 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id) mac->mii.clk_csr_shift = 2; mac->mii.clk_csr_mask = GENMASK(5, 2); - /* Synopsys Id is not available on old chips */ - *synopsys_id = 0; - - return mac; + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 6502b9aa3bf5..21dee25ee570 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -29,8 +29,7 @@ #include "dwmac_dma.h" static void dwmac100_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), @@ -38,12 +37,22 @@ static void dwmac100_dma_init(void __iomem *ioaddr, /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); +} - /* RX/TX descriptor base addr lists must be written into - * DMA CSR3 and CSR4, respectively - */ - writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); - writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); +static void dwmac100_dma_init_rx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan) +{ + /* RX descriptor base addr lists must be written into DMA CSR3 */ + writel(dma_rx_phy, ioaddr + DMA_RCV_BASE_ADDR); +} + +static void dwmac100_dma_init_tx(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan) +{ + /* TX descriptor base addr lists must be written into DMA CSR4 */ + writel(dma_tx_phy, ioaddr + DMA_TX_BASE_ADDR); } /* Store and Forward capability is not used at all. @@ -51,14 +60,14 @@ static void dwmac100_dma_init(void __iomem *ioaddr, * The transmit threshold can be programmed by setting the TTC bits in the DMA * control register. */ -static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode, - int rxmode, int rxfifosz) +static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode, + u32 channel, int fifosz, u8 qmode) { u32 csr6 = readl(ioaddr + DMA_CONTROL); - if (txmode <= 32) + if (mode <= 32) csr6 |= DMA_CONTROL_TTC_32; - else if (txmode <= 64) + else if (mode <= 64) csr6 |= DMA_CONTROL_TTC_64; else csr6 |= DMA_CONTROL_TTC_128; @@ -112,8 +121,10 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, const struct stmmac_dma_ops dwmac100_dma_ops = { .reset = dwmac_dma_reset, .init = dwmac100_dma_init, + .init_rx_chan = dwmac100_dma_init_rx, + .init_tx_chan = dwmac100_dma_init_tx, .dump_regs = dwmac100_dump_dma_regs, - .dma_mode = dwmac100_dma_operation_mode, + .dma_tx_mode = dwmac100_dma_operation_mode_tx, .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr, .enable_dma_transmission = dwmac_enable_dma_transmission, .enable_dma_irq = dwmac_enable_dma_irq, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index dedd40613090..eb013d54025a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -34,7 +34,6 @@ #define GMAC_PCS_BASE 0x000000e0 #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 #define GMAC_PMT 0x000000c0 -#define GMAC_VERSION 0x00000110 #define GMAC_DEBUG 0x00000114 #define GMAC_HW_FEATURE0 0x0000011c #define GMAC_HW_FEATURE1 0x00000120 @@ -188,6 +187,7 @@ enum power_event { #define GMAC_HW_RXFIFOSIZE GENMASK(4, 0) /* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24) #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) #define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) #define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6) @@ -195,6 +195,9 @@ enum power_event { /* MAC HW features3 bitmap */ #define GMAC_HW_FEAT_ASP GENMASK(29, 28) +#define GMAC_HW_FEAT_FRPES GENMASK(14, 13) +#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11) +#define GMAC_HW_FEAT_FRPSEL BIT(10) /* MAC HW ADDR regs */ #define GMAC_HI_DCS GENMASK(18, 16) @@ -203,6 +206,7 @@ enum power_event { /* MTL registers */ #define MTL_OPERATION_MODE 0x00000c00 +#define MTL_FRPE BIT(15) #define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5) #define MTL_OPERATION_SCHALG_WRR (0x0 << 5) #define MTL_OPERATION_SCHALG_WFQ (0x1 << 5) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 517b1f6736a8..7e5d5db0d516 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -18,6 +18,7 @@ #include <linux/ethtool.h> #include <linux/io.h> #include <net/dsa.h> +#include "stmmac.h" #include "stmmac_pcs.h" #include "dwmac4.h" #include "dwmac5.h" @@ -700,7 +701,7 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x, x->mac_gmii_rx_proto_engine++; } -static const struct stmmac_ops dwmac4_ops = { +const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -731,7 +732,7 @@ static const struct stmmac_ops dwmac4_ops = { .set_filter = dwmac4_set_filter, }; -static const struct stmmac_ops dwmac410_ops = { +const struct stmmac_ops dwmac410_ops = { .core_init = dwmac4_core_init, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -762,7 +763,7 @@ static const struct stmmac_ops dwmac410_ops = { .set_filter = dwmac4_set_filter, }; -static const struct stmmac_ops dwmac510_ops = { +const struct stmmac_ops dwmac510_ops = { .core_init = dwmac4_core_init, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, @@ -794,21 +795,20 @@ static const struct stmmac_ops dwmac510_ops = { .safety_feat_config = dwmac5_safety_feat_config, .safety_feat_irq_status = dwmac5_safety_feat_irq_status, .safety_feat_dump = dwmac5_safety_feat_dump, + .rxp_config = dwmac5_rxp_config, + .flex_pps_config = dwmac5_flex_pps_config, }; -struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries, int *synopsys_id) +int dwmac4_setup(struct stmmac_priv *priv) { - struct mac_device_info *mac; - u32 hwid = readl(ioaddr + GMAC_VERSION); + struct mac_device_info *mac = priv->hw; - mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); - if (!mac) - return NULL; + dev_info(priv->device, "\tDWMAC4/5\n"); - mac->pcsr = ioaddr; - mac->multicast_filter_bins = mcbins; - mac->unicast_filter_entries = perfect_uc_entries; + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; mac->mcast_bits_log2 = 0; if (mac->multicast_filter_bins) @@ -828,20 +828,5 @@ struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, mac->mii.clk_csr_shift = 8; mac->mii.clk_csr_mask = GENMASK(11, 8); - /* Get and dump the chip ID */ - *synopsys_id = stmmac_get_synopsys_id(hwid); - - if (*synopsys_id > DWMAC_CORE_4_00) - mac->dma = &dwmac410_dma_ops; - else - mac->dma = &dwmac4_dma_ops; - - if (*synopsys_id >= DWMAC_CORE_5_10) - mac->mac = &dwmac510_ops; - else if (*synopsys_id >= DWMAC_CORE_4_00) - mac->mac = &dwmac410_ops; - else - mac->mac = &dwmac4_ops; - - return mac; + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 2a6521d33e43..20299f6f65fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -189,9 +189,12 @@ static void dwmac4_set_tx_owner(struct dma_desc *p) p->des3 |= cpu_to_le32(TDES3_OWN); } -static void dwmac4_set_rx_owner(struct dma_desc *p) +static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { - p->des3 |= cpu_to_le32(RDES3_OWN); + p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); + + if (!disable_rx_ic) + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); } static int dwmac4_get_tx_ls(struct dma_desc *p) @@ -223,7 +226,7 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) return 0; } -static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) +static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) { struct dma_desc *p = (struct dma_desc *)desc; u64 ns; @@ -232,7 +235,7 @@ static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) /* convert high/sec time stamp value to nanosecond */ ns += le32_to_cpu(p->des1) * 1000000000ULL; - return ns; + *ts = ns; } static int dwmac4_rx_check_timestamp(void *desc) @@ -292,10 +295,7 @@ exit: static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR); - - if (!disable_rx_ic) - p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN); + dwmac4_set_rx_owner(p, disable_rx_ic); } static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) @@ -424,6 +424,25 @@ static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV); } +static void dwmac4_get_addr(struct dma_desc *p, unsigned int *addr) +{ + *addr = le32_to_cpu(p->des0); +} + +static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des0 = cpu_to_le32(addr); + p->des1 = 0; +} + +static void dwmac4_clear(struct dma_desc *p) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + const struct stmmac_desc_ops dwmac4_desc_ops = { .tx_status = dwmac4_wrback_get_tx_status, .rx_status = dwmac4_wrback_get_rx_status, @@ -445,6 +464,9 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .init_tx_desc = dwmac4_rd_init_tx_desc, .display_ring = dwmac4_display_ring, .set_mss = dwmac4_set_mss_ctxt, + .get_addr = dwmac4_get_addr, + .set_addr = dwmac4_set_addr, + .clear = dwmac4_clear, }; const struct stmmac_mode_ops dwmac4_ring_mode_ops = { }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index d37d457306d1..d37f17ca62fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -94,6 +94,10 @@ static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr, value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT); + + /* Enable OSP to get best performance */ + value |= DMA_CONTROL_OSP; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan)); writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); @@ -116,8 +120,7 @@ static void dwmac4_dma_init_channel(void __iomem *ioaddr, } static void dwmac4_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, - u32 dma_tx, u32 dma_rx, int atds) + struct stmmac_dma_cfg *dma_cfg, int atds) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); @@ -370,6 +373,8 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1; dma_cap->number_tx_queues = ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1; + /* PPS output */ + dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24; /* IEEE 1588-2002 */ dma_cap->time_stamp = 0; @@ -379,6 +384,9 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr, /* 5.10 Features */ dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28; + dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13; + dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11; + dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10; } /* Enable/disable TSO feature and set MSS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 8474bf961dd0..c63c1fe3f26b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -184,7 +184,6 @@ #define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 int dwmac4_dma_reset(void __iomem *ioaddr); -void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan); void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index 860de39999c7..3f4f3132e16b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -7,6 +7,8 @@ #include "common.h" #include "dwmac4.h" #include "dwmac5.h" +#include "stmmac.h" +#include "stmmac_ptp.h" struct dwmac5_error_desc { bool valid; @@ -237,15 +239,16 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp) return 0; } -bool dwmac5_safety_feat_irq_status(struct net_device *ndev, +int dwmac5_safety_feat_irq_status(struct net_device *ndev, void __iomem *ioaddr, unsigned int asp, struct stmmac_safety_stats *stats) { - bool ret = false, err, corr; + bool err, corr; u32 mtl, dma; + int ret = 0; if (!asp) - return false; + return -EINVAL; mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS); dma = readl(ioaddr + DMA_SAFETY_INT_STATUS); @@ -282,17 +285,267 @@ static const struct dwmac5_error { { dwmac5_dma_errors }, }; -const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, - int index, unsigned long *count) +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc) { int module = index / 32, offset = index % 32; unsigned long *ptr = (unsigned long *)stats; if (module >= ARRAY_SIZE(dwmac5_all_errors)) - return NULL; + return -EINVAL; if (!dwmac5_all_errors[module].desc[offset].valid) - return NULL; + return -EINVAL; if (count) *count = *(ptr + index); - return dwmac5_all_errors[module].desc[offset].desc; + if (desc) + *desc = dwmac5_all_errors[module].desc[offset].desc; + return 0; +} + +static int dwmac5_rxp_disable(void __iomem *ioaddr) +{ + u32 val; + int ret; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val &= ~MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); + + ret = readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val, + val & RXPI, 1, 10000); + if (ret) + return ret; + return 0; +} + +static void dwmac5_rxp_enable(void __iomem *ioaddr) +{ + u32 val; + + val = readl(ioaddr + MTL_OPERATION_MODE); + val |= MTL_FRPE; + writel(val, ioaddr + MTL_OPERATION_MODE); +} + +static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr, + struct stmmac_tc_entry *entry, + int pos) +{ + int ret, i; + + for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) { + int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i; + u32 val; + + /* Wait for ready */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + + /* Write data */ + val = *((u32 *)&entry->val + i); + writel(val, ioaddr + MTL_RXP_IACC_DATA); + + /* Write pos */ + val = real_pos & ADDR; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Write OP */ + val |= WRRDN; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Start Write */ + val |= STARTBUSY; + writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS); + + /* Wait for done */ + ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS, + val, !(val & STARTBUSY), 1, 10000); + if (ret) + return ret; + } + + return 0; +} + +static struct stmmac_tc_entry * +dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count, + u32 curr_prio) +{ + struct stmmac_tc_entry *entry; + u32 min_prio = ~0x0; + int i, min_prio_idx; + bool found = false; + + for (i = count - 1; i >= 0; i--) { + entry = &entries[i]; + + /* Do not update unused entries */ + if (!entry->in_use) + continue; + /* Do not update already updated entries (i.e. fragments) */ + if (entry->in_hw) + continue; + /* Let last entry be updated last */ + if (entry->is_last) + continue; + /* Do not return fragments */ + if (entry->is_frag) + continue; + /* Check if we already checked this prio */ + if (entry->prio < curr_prio) + continue; + /* Check if this is the minimum prio */ + if (entry->prio < min_prio) { + min_prio = entry->prio; + min_prio_idx = i; + found = true; + } + } + + if (found) + return &entries[min_prio_idx]; + return NULL; +} + +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count) +{ + struct stmmac_tc_entry *entry, *frag; + int i, ret, nve = 0; + u32 curr_prio = 0; + u32 old_val, val; + + /* Force disable RX */ + old_val = readl(ioaddr + GMAC_CONFIG); + val = old_val & ~GMAC_CONFIG_RE; + writel(val, ioaddr + GMAC_CONFIG); + + /* Disable RX Parser */ + ret = dwmac5_rxp_disable(ioaddr); + if (ret) + goto re_enable; + + /* Set all entries as NOT in HW */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + entry->in_hw = false; + } + + /* Update entries by reverse order */ + while (1) { + entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio); + if (!entry) + break; + + curr_prio = entry->prio; + frag = entry->frag_ptr; + + /* Set special fragment requirements */ + if (frag) { + entry->val.af = 0; + entry->val.rf = 0; + entry->val.nc = 1; + entry->val.ok_index = nve + 2; + } + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + entry->in_hw = true; + + if (frag && !frag->in_hw) { + ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve); + if (ret) + goto re_enable; + frag->table_pos = nve++; + frag->in_hw = true; + } + } + + if (!nve) + goto re_enable; + + /* Update all pass entry */ + for (i = 0; i < count; i++) { + entry = &entries[i]; + if (!entry->is_last) + continue; + + ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve); + if (ret) + goto re_enable; + + entry->table_pos = nve++; + } + + /* Assume n. of parsable entries == n. of valid entries */ + val = (nve << 16) & NPE; + val |= nve & NVE; + writel(val, ioaddr + MTL_RXP_CONTROL_STATUS); + + /* Enable RX Parser */ + dwmac5_rxp_enable(ioaddr); + +re_enable: + /* Re-enable RX */ + writel(old_val, ioaddr + GMAC_CONFIG); + return ret; +} + +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags) +{ + u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + u32 val = readl(ioaddr + MAC_PPS_CONTROL); + u64 period; + + if (!cfg->available) + return -EINVAL; + if (tnsec & TRGTBUSY0) + return -EBUSY; + if (!sub_second_inc || !systime_flags) + return -EINVAL; + + val &= ~PPSx_MASK(index); + + if (!enable) { + val |= PPSCMDx(index, 0x5); + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; + } + + val |= PPSCMDx(index, 0x2); + val |= TRGTMODSELx(index, 0x2); + val |= PPSEN0; + + writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); + + if (!(systime_flags & PTP_TCR_TSCTRLSSR)) + cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465; + writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index)); + + period = cfg->period.tv_sec * 1000000000; + period += cfg->period.tv_nsec; + + do_div(period, sub_second_inc); + + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index)); + + period >>= 1; + if (period <= 1) + return -EINVAL; + + writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); + + /* Finally, activate it */ + writel(val, ioaddr + MAC_PPS_CONTROL); + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h index a0d2c44711b9..775db776b3cc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h @@ -11,6 +11,36 @@ #define PRTYEN BIT(1) #define TMOUTEN BIT(0) +#define MAC_PPS_CONTROL 0x00000b70 +#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1) +#define PPS_MINIDX(x) ((x) * 8) +#define PPSx_MASK(x) GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x)) +#define MCGRENx(x) BIT(PPS_MAXIDX(x)) +#define TRGTMODSELx(x, val) \ + GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) & \ + ((val) << (PPS_MAXIDX(x) - 2)) +#define PPSCMDx(x, val) \ + GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) & \ + ((val) << PPS_MINIDX(x)) +#define PPSEN0 BIT(4) +#define MAC_PPSx_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10)) +#define MAC_PPSx_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10)) +#define TRGTBUSY0 BIT(31) +#define TTSL0 GENMASK(30, 0) +#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10)) +#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10)) + +#define MTL_RXP_CONTROL_STATUS 0x00000ca0 +#define RXPI BIT(31) +#define NPE GENMASK(23, 16) +#define NVE GENMASK(7, 0) +#define MTL_RXP_IACC_CTRL_STATUS 0x00000cb0 +#define STARTBUSY BIT(31) +#define RXPEIEC GENMASK(22, 21) +#define RXPEIEE BIT(20) +#define WRRDN BIT(16) +#define ADDR GENMASK(15, 0) +#define MTL_RXP_IACC_DATA 0x00000cb4 #define MTL_ECC_CONTROL 0x00000cc0 #define TSOEE BIT(4) #define MRXPEE BIT(3) @@ -43,10 +73,15 @@ #define DMA_ECC_INT_STATUS 0x00001088 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp); -bool dwmac5_safety_feat_irq_status(struct net_device *ndev, +int dwmac5_safety_feat_irq_status(struct net_device *ndev, void __iomem *ioaddr, unsigned int asp, struct stmmac_safety_stats *stats); -const char *dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, - int index, unsigned long *count); +int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); +int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); +int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); #endif /* __DWMAC5_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 6768a25b6aa0..77914c89d749 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -292,7 +292,7 @@ static void enh_desc_set_tx_owner(struct dma_desc *p) p->des0 |= cpu_to_le32(ETDES0_OWN); } -static void enh_desc_set_rx_owner(struct dma_desc *p) +static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { p->des0 |= cpu_to_le32(RDES0_OWN); } @@ -382,7 +382,7 @@ static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17; } -static u64 enh_desc_get_timestamp(void *desc, u32 ats) +static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts) { u64 ns; @@ -397,7 +397,7 @@ static u64 enh_desc_get_timestamp(void *desc, u32 ats) ns += le32_to_cpu(p->des3) * 1000000000ULL; } - return ns; + *ts = ns; } static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc, @@ -437,6 +437,21 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx) pr_info("\n"); } +static void enh_desc_get_addr(struct dma_desc *p, unsigned int *addr) +{ + *addr = le32_to_cpu(p->des2); +} + +static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + +static void enh_desc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, @@ -457,4 +472,7 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_timestamp = enh_desc_get_timestamp, .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, .display_ring = enh_desc_display_ring, + .get_addr = enh_desc_get_addr, + .set_addr = enh_desc_set_addr, + .clear = enh_desc_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c new file mode 100644 index 000000000000..14770fc8865e --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac HW Interface Handling + */ + +#include "common.h" +#include "stmmac.h" +#include "stmmac_ptp.h" + +static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) +{ + u32 reg = readl(priv->ioaddr + id_reg); + + if (!reg) { + dev_info(priv->device, "Version ID not available\n"); + return 0x0; + } + + dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", + (unsigned int)(reg & GENMASK(15, 8)) >> 8, + (unsigned int)(reg & GENMASK(7, 0))); + return reg & GENMASK(7, 0); +} + +static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->chain_mode) { + dev_info(priv->device, "Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + mac->mode = &chain_mode_ops; + } else { + dev_info(priv->device, "Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + mac->mode = &ring_mode_ops; + } +} + +static int stmmac_dwmac1_quirks(struct stmmac_priv *priv) +{ + struct mac_device_info *mac = priv->hw; + + if (priv->plat->enh_desc) { + dev_info(priv->device, "Enhanced/Alternate descriptors\n"); + + /* GMAC older than 3.50 has no extended descriptors */ + if (priv->synopsys_id >= DWMAC_CORE_3_50) { + dev_info(priv->device, "Enabled extended descriptors\n"); + priv->extend_desc = 1; + } else { + dev_warn(priv->device, "Extended descriptors not supported\n"); + } + + mac->desc = &enh_desc_ops; + } else { + dev_info(priv->device, "Normal descriptors\n"); + mac->desc = &ndesc_ops; + } + + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static int stmmac_dwmac4_quirks(struct stmmac_priv *priv) +{ + stmmac_dwmac_mode_quirk(priv); + return 0; +} + +static const struct stmmac_hwif_entry { + bool gmac; + bool gmac4; + u32 min_id; + const struct stmmac_regs_off regs; + const void *desc; + const void *dma; + const void *mac; + const void *hwtimestamp; + const void *mode; + const void *tc; + int (*setup)(struct stmmac_priv *priv); + int (*quirks)(struct stmmac_priv *priv); +} stmmac_hw[] = { + /* NOTE: New HW versions shall go to the end of this table */ + { + .gmac = false, + .gmac4 = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, + .desc = NULL, + .dma = &dwmac100_dma_ops, + .mac = &dwmac100_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, + .setup = dwmac100_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = true, + .gmac4 = false, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC3_X_OFFSET, + .mmc_off = MMC_GMAC3_X_OFFSET, + }, + .desc = NULL, + .dma = &dwmac1000_dma_ops, + .mac = &dwmac1000_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, + .setup = dwmac1000_setup, + .quirks = stmmac_dwmac1_quirks, + }, { + .gmac = false, + .gmac4 = true, + .min_id = 0, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac4_ops, + .hwtimestamp = &stmmac_ptp, + .mode = NULL, + .tc = NULL, + .setup = dwmac4_setup, + .quirks = stmmac_dwmac4_quirks, + }, { + .gmac = false, + .gmac4 = true, + .min_id = DWMAC_CORE_4_00, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac4_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = NULL, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .min_id = DWMAC_CORE_4_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac410_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = NULL, + .setup = dwmac4_setup, + .quirks = NULL, + }, { + .gmac = false, + .gmac4 = true, + .min_id = DWMAC_CORE_5_10, + .regs = { + .ptp_off = PTP_GMAC4_OFFSET, + .mmc_off = MMC_GMAC4_OFFSET, + }, + .desc = &dwmac4_desc_ops, + .dma = &dwmac410_dma_ops, + .mac = &dwmac510_ops, + .hwtimestamp = &stmmac_ptp, + .mode = &dwmac4_ring_mode_ops, + .tc = &dwmac510_tc_ops, + .setup = dwmac4_setup, + .quirks = NULL, + } +}; + +int stmmac_hwif_init(struct stmmac_priv *priv) +{ + bool needs_gmac4 = priv->plat->has_gmac4; + bool needs_gmac = priv->plat->has_gmac; + const struct stmmac_hwif_entry *entry; + struct mac_device_info *mac; + bool needs_setup = true; + int i, ret; + u32 id; + + if (needs_gmac) { + id = stmmac_get_id(priv, GMAC_VERSION); + } else if (needs_gmac4) { + id = stmmac_get_id(priv, GMAC4_VERSION); + } else { + id = 0; + } + + /* Save ID for later use */ + priv->synopsys_id = id; + + /* Lets assume some safe values first */ + priv->ptpaddr = priv->ioaddr + + (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET); + priv->mmcaddr = priv->ioaddr + + (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET); + + /* Check for HW specific setup first */ + if (priv->plat->setup) { + mac = priv->plat->setup(priv); + needs_setup = false; + } else { + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + } + + if (!mac) + return -ENOMEM; + + /* Fallback to generic HW */ + for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) { + entry = &stmmac_hw[i]; + + if (needs_gmac ^ entry->gmac) + continue; + if (needs_gmac4 ^ entry->gmac4) + continue; + /* Use synopsys_id var because some setups can override this */ + if (priv->synopsys_id < entry->min_id) + continue; + + /* Only use generic HW helpers if needed */ + mac->desc = mac->desc ? : entry->desc; + mac->dma = mac->dma ? : entry->dma; + mac->mac = mac->mac ? : entry->mac; + mac->ptp = mac->ptp ? : entry->hwtimestamp; + mac->mode = mac->mode ? : entry->mode; + mac->tc = mac->tc ? : entry->tc; + + priv->hw = mac; + priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off; + priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off; + + /* Entry found */ + if (needs_setup) { + ret = entry->setup(priv); + if (ret) + return ret; + } + + /* Run quirks, if needed */ + if (entry->quirks) { + ret = entry->quirks(priv); + if (ret) + return ret; + } + + return 0; + } + + dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n", + id, needs_gmac, needs_gmac4); + return -EINVAL; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h new file mode 100644 index 000000000000..e44e7b26ce82 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. +// stmmac HW Interface Callbacks + +#ifndef __STMMAC_HWIF_H__ +#define __STMMAC_HWIF_H__ + +#include <linux/netdevice.h> + +#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \ + (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result = 0; \ + } \ + __result; \ +}) +#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \ +({ \ + int __result = -EINVAL; \ + if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \ + __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \ + __result; \ +}) + +struct stmmac_extra_stats; +struct stmmac_safety_stats; +struct dma_desc; +struct dma_extended_desc; + +/* Descriptors helpers */ +struct stmmac_desc_ops { + /* DMA RX descriptor ring initialization */ + void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, + int end); + /* DMA TX descriptor ring initialization */ + void (*init_tx_desc)(struct dma_desc *p, int mode, int end); + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, bool ls, + unsigned int tot_pkt_len); + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, + int len2, bool tx_own, bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen); + /* Set/get the owner of the descriptor */ + void (*set_tx_owner)(struct dma_desc *p); + int (*get_tx_owner)(struct dma_desc *p); + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc)(struct dma_desc *p, int mode); + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted */ + void (*set_tx_ic)(struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls)(struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, void __iomem *ioaddr); + /* Get the buffer size from the descriptor */ + int (*get_tx_len)(struct dma_desc *p); + /* Handle extra events on specific interrupts hw dependent */ + void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic); + /* Get the receive frame size */ + int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type); + /* Return the reception status looking at the RDES1 */ + int (*rx_status)(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p); + void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x, + struct dma_extended_desc *p); + /* Set tx timestamp enable bit */ + void (*enable_tx_timestamp) (struct dma_desc *p); + /* get tx timestamp status */ + int (*get_tx_timestamp_status) (struct dma_desc *p); + /* get timestamp value */ + void (*get_timestamp)(void *desc, u32 ats, u64 *ts); + /* get rx timestamp status */ + int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats); + /* Display ring */ + void (*display_ring)(void *head, unsigned int size, bool rx); + /* set MSS via context descriptor */ + void (*set_mss)(struct dma_desc *p, unsigned int mss); + /* get descriptor skbuff address */ + void (*get_addr)(struct dma_desc *p, unsigned int *addr); + /* set descriptor skbuff address */ + void (*set_addr)(struct dma_desc *p, dma_addr_t addr); + /* clear descriptor */ + void (*clear)(struct dma_desc *p); +}; + +#define stmmac_init_rx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_rx_desc, __args) +#define stmmac_init_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, init_tx_desc, __args) +#define stmmac_prepare_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args) +#define stmmac_prepare_tso_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args) +#define stmmac_set_tx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_owner, __args) +#define stmmac_get_tx_owner(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_owner, __args) +#define stmmac_release_tx_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, release_tx_desc, __args) +#define stmmac_set_tx_ic(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_tx_ic, __args) +#define stmmac_get_tx_ls(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_ls, __args) +#define stmmac_tx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, tx_status, __args) +#define stmmac_get_tx_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_len, __args) +#define stmmac_set_rx_owner(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_rx_owner, __args) +#define stmmac_get_rx_frame_len(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_frame_len, __args) +#define stmmac_rx_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, rx_status, __args) +#define stmmac_rx_extended_status(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, rx_extended_status, __args) +#define stmmac_enable_tx_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args) +#define stmmac_get_tx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args) +#define stmmac_get_timestamp(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_timestamp, __args) +#define stmmac_get_rx_timestamp_status(__priv, __args...) \ + stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args) +#define stmmac_display_ring(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, display_ring, __args) +#define stmmac_set_mss(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_mss, __args) +#define stmmac_get_desc_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, get_addr, __args) +#define stmmac_set_desc_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, set_addr, __args) +#define stmmac_clear_desc(__priv, __args...) \ + stmmac_do_void_callback(__priv, desc, clear, __args) + +struct stmmac_dma_cfg; +struct dma_features; + +/* Specific DMA helpers */ +struct stmmac_dma_ops { + /* DMA core initialization */ + int (*reset)(void __iomem *ioaddr); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, + int atds); + void (*init_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan); + void (*init_rx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_rx_phy, u32 chan); + void (*init_tx_chan)(void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 dma_tx_phy, u32 chan); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); + /* Dump DMA registers */ + void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space); + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz, u8 qmode); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + void __iomem *ioaddr); + void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan); + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan); + void (*start_tx)(void __iomem *ioaddr, u32 chan); + void (*stop_tx)(void __iomem *ioaddr, u32 chan); + void (*start_rx)(void __iomem *ioaddr, u32 chan); + void (*stop_rx)(void __iomem *ioaddr, u32 chan); + int (*dma_interrupt) (void __iomem *ioaddr, + struct stmmac_extra_stats *x, u32 chan); + /* If supported then get the optional core features */ + void (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); + /* Program the HW RX Watchdog */ + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan); + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan); + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); +}; + +#define stmmac_reset(__priv, __args...) \ + stmmac_do_callback(__priv, dma, reset, __args) +#define stmmac_dma_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init, __args) +#define stmmac_init_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_chan, __args) +#define stmmac_init_rx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_rx_chan, __args) +#define stmmac_init_tx_chan(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, init_tx_chan, __args) +#define stmmac_axi(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, axi, __args) +#define stmmac_dump_dma_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dump_regs, __args) +#define stmmac_dma_rx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args) +#define stmmac_dma_tx_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args) +#define stmmac_dma_diagnostic_fr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args) +#define stmmac_enable_dma_transmission(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args) +#define stmmac_enable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args) +#define stmmac_disable_dma_irq(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args) +#define stmmac_start_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_tx, __args) +#define stmmac_stop_tx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_tx, __args) +#define stmmac_start_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, start_rx, __args) +#define stmmac_stop_rx(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, stop_rx, __args) +#define stmmac_dma_interrupt_status(__priv, __args...) \ + stmmac_do_callback(__priv, dma, dma_interrupt, __args) +#define stmmac_get_hw_feature(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, get_hw_feature, __args) +#define stmmac_rx_watchdog(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, rx_watchdog, __args) +#define stmmac_set_tx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args) +#define stmmac_set_rx_ring_len(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args) +#define stmmac_set_rx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args) +#define stmmac_set_tx_tail_ptr(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) +#define stmmac_enable_tso(__priv, __args...) \ + stmmac_do_void_callback(__priv, dma, enable_tso, __args) + +struct mac_device_info; +struct net_device; +struct rgmii_adv; +struct stmmac_safety_stats; +struct stmmac_tc_entry; +struct stmmac_pps_cfg; + +/* Helpers to program the MAC core */ +struct stmmac_ops { + /* MAC core initialization */ + void (*core_init)(struct mac_device_info *hw, struct net_device *dev); + /* Enable the MAC RX/TX */ + void (*set_mac)(void __iomem *ioaddr, bool enable); + /* Enable and verify that the IPC module is supported */ + int (*rx_ipc)(struct mac_device_info *hw); + /* Enable RX Queues */ + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue); + /* RX Queues Priority */ + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* TX Queues Priority */ + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue); + /* RX Queues Routing */ + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet, + u32 queue); + /* Program RX Algorithms */ + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg); + /* Program TX Algorithms */ + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg); + /* Set MTL TX queues weight */ + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw, + u32 weight, u32 queue); + /* RX MTL queue to RX dma mapping */ + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan); + /* Configure AV Algorithm */ + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope, + u32 idle_slope, u32 high_credit, u32 low_credit, + u32 queue); + /* Dump MAC registers */ + void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space); + /* Handle extra events on specific interrupts hw dependent */ + int (*host_irq_status)(struct mac_device_info *hw, + struct stmmac_extra_stats *x); + /* Handle MTL interrupts */ + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan); + /* Multicast filter setting */ + void (*set_filter)(struct mac_device_info *hw, struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time, u32 tx_cnt); + /* Set power management mode (e.g. magic frame) */ + void (*pmt)(struct mac_device_info *hw, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, + unsigned int reg_n); + void (*set_eee_mode)(struct mac_device_info *hw, + bool en_tx_lpi_clockgating); + void (*reset_eee_mode)(struct mac_device_info *hw); + void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); + void (*set_eee_pls)(struct mac_device_info *hw, int link); + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x, + u32 rx_queues, u32 tx_queues); + /* PCS calls */ + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback); + void (*pcs_rane)(void __iomem *ioaddr, bool restart); + void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); + /* Safety Features */ + int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp); + int (*safety_feat_irq_status)(struct net_device *ndev, + void __iomem *ioaddr, unsigned int asp, + struct stmmac_safety_stats *stats); + int (*safety_feat_dump)(struct stmmac_safety_stats *stats, + int index, unsigned long *count, const char **desc); + /* Flexible RX Parser */ + int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries, + unsigned int count); + /* Flexible PPS */ + int (*flex_pps_config)(void __iomem *ioaddr, int index, + struct stmmac_pps_cfg *cfg, bool enable, + u32 sub_second_inc, u32 systime_flags); +}; + +#define stmmac_core_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, core_init, __args) +#define stmmac_mac_set(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mac, __args) +#define stmmac_rx_ipc(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rx_ipc, __args) +#define stmmac_rx_queue_enable(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args) +#define stmmac_rx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args) +#define stmmac_tx_queue_prio(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args) +#define stmmac_rx_queue_routing(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args) +#define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args) +#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args) +#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args) +#define stmmac_map_mtl_to_dma(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args) +#define stmmac_config_cbs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, config_cbs, __args) +#define stmmac_dump_mac_regs(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, dump_regs, __args) +#define stmmac_host_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_irq_status, __args) +#define stmmac_host_mtl_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args) +#define stmmac_set_filter(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_filter, __args) +#define stmmac_flow_ctrl(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, flow_ctrl, __args) +#define stmmac_pmt(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pmt, __args) +#define stmmac_set_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_umac_addr, __args) +#define stmmac_get_umac_addr(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, get_umac_addr, __args) +#define stmmac_set_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_mode, __args) +#define stmmac_reset_eee_mode(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args) +#define stmmac_set_eee_timer(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_timer, __args) +#define stmmac_set_eee_pls(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, set_eee_pls, __args) +#define stmmac_mac_debug(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, debug, __args) +#define stmmac_pcs_ctrl_ane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) +#define stmmac_pcs_rane(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_rane, __args) +#define stmmac_pcs_get_adv_lp(__priv, __args...) \ + stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) +#define stmmac_safety_feat_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_config, __args) +#define stmmac_safety_feat_irq_status(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args) +#define stmmac_safety_feat_dump(__priv, __args...) \ + stmmac_do_callback(__priv, mac, safety_feat_dump, __args) +#define stmmac_rxp_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, rxp_config, __args) +#define stmmac_flex_pps_config(__priv, __args...) \ + stmmac_do_callback(__priv, mac, flex_pps_config, __args) + +/* PTP and HW Timer helpers */ +struct stmmac_hwtimestamp { + void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); + void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4, u32 *ssinc); + int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); + int (*config_addend) (void __iomem *ioaddr, u32 addend); + int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4); + void (*get_systime) (void __iomem *ioaddr, u64 *systime); +}; + +#define stmmac_config_hw_tstamping(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args) +#define stmmac_config_sub_second_increment(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args) +#define stmmac_init_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, init_systime, __args) +#define stmmac_config_addend(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, config_addend, __args) +#define stmmac_adjust_systime(__priv, __args...) \ + stmmac_do_callback(__priv, ptp, adjust_systime, __args) +#define stmmac_get_systime(__priv, __args...) \ + stmmac_do_void_callback(__priv, ptp, get_systime, __args) + +/* Helpers to manage the descriptors for chain and ring modes */ +struct stmmac_mode_ops { + void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, + unsigned int extend_desc); + unsigned int (*is_jumbo_frm) (int len, int ehn_desc); + int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); + int (*set_16kib_bfsize)(int mtu); + void (*init_desc3)(struct dma_desc *p); + void (*refill_desc3) (void *priv, struct dma_desc *p); + void (*clean_desc3) (void *priv, struct dma_desc *p); +}; + +#define stmmac_mode_init(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init, __args) +#define stmmac_is_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, is_jumbo_frm, __args) +#define stmmac_jumbo_frm(__priv, __args...) \ + stmmac_do_callback(__priv, mode, jumbo_frm, __args) +#define stmmac_set_16kib_bfsize(__priv, __args...) \ + stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args) +#define stmmac_init_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, init_desc3, __args) +#define stmmac_refill_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, refill_desc3, __args) +#define stmmac_clean_desc3(__priv, __args...) \ + stmmac_do_void_callback(__priv, mode, clean_desc3, __args) + +struct stmmac_priv; +struct tc_cls_u32_offload; + +struct stmmac_tc_ops { + int (*init)(struct stmmac_priv *priv); + int (*setup_cls_u32)(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls); +}; + +#define stmmac_tc_init(__priv, __args...) \ + stmmac_do_callback(__priv, tc, init, __args) +#define stmmac_tc_setup_cls_u32(__priv, __args...) \ + stmmac_do_callback(__priv, tc, setup_cls_u32, __args) + +struct stmmac_regs_off { + u32 ptp_off; + u32 mmc_off; +}; + +extern const struct stmmac_ops dwmac100_ops; +extern const struct stmmac_dma_ops dwmac100_dma_ops; +extern const struct stmmac_ops dwmac1000_ops; +extern const struct stmmac_dma_ops dwmac1000_dma_ops; +extern const struct stmmac_ops dwmac4_ops; +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_ops dwmac410_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; +extern const struct stmmac_ops dwmac510_ops; +extern const struct stmmac_tc_ops dwmac510_tc_ops; + +#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */ + +int stmmac_hwif_init(struct stmmac_priv *priv); + +#endif /* __STMMAC_HWIF_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index ebd9e5e00f16..de65bb29feba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -168,7 +168,7 @@ static void ndesc_set_tx_owner(struct dma_desc *p) p->des0 |= cpu_to_le32(TDES0_OWN); } -static void ndesc_set_rx_owner(struct dma_desc *p) +static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic) { p->des0 |= cpu_to_le32(RDES0_OWN); } @@ -253,7 +253,7 @@ static int ndesc_get_tx_timestamp_status(struct dma_desc *p) return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17; } -static u64 ndesc_get_timestamp(void *desc, u32 ats) +static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts) { struct dma_desc *p = (struct dma_desc *)desc; u64 ns; @@ -262,7 +262,7 @@ static u64 ndesc_get_timestamp(void *desc, u32 ats) /* convert high/sec time stamp value to nanosecond */ ns += le32_to_cpu(p->des3) * 1000000000ULL; - return ns; + *ts = ns; } static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats) @@ -297,6 +297,21 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx) pr_info("\n"); } +static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr) +{ + *addr = le32_to_cpu(p->des2); +} + +static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr) +{ + p->des2 = cpu_to_le32(addr); +} + +static void ndesc_clear(struct dma_desc *p) +{ + p->des2 = 0; +} + const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, @@ -316,4 +331,7 @@ const struct stmmac_desc_ops ndesc_ops = { .get_timestamp = ndesc_get_timestamp, .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, .display_ring = ndesc_display_ring, + .get_addr = ndesc_get_addr, + .set_addr = ndesc_set_addr, + .clear = ndesc_clear, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 28e4b5d50ce6..a7ffc73fffe8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -24,7 +24,7 @@ #include "stmmac.h" -static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) +static int jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p; unsigned int nopaged_len = skb_headlen(skb); @@ -58,9 +58,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, - STMMAC_RING_MODE, 0, - false, skb->len); + stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, + STMMAC_RING_MODE, 0, false, skb->len); tx_q->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -79,9 +78,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_RING_MODE, 1, - true, skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum, + STMMAC_RING_MODE, 1, true, skb->len); } else { des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); @@ -92,9 +90,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) tx_q->tx_skbuff_dma[entry].len = nopaged_len; tx_q->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); - priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, - STMMAC_RING_MODE, 0, - true, skb->len); + stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, + STMMAC_RING_MODE, 0, true, skb->len); } tx_q->cur_tx = entry; @@ -102,7 +99,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return entry; } -static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) +static unsigned int is_jumbo_frm(int len, int enh_desc) { unsigned int ret = 0; @@ -112,7 +109,7 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) return ret; } -static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) +static void refill_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; @@ -122,12 +119,12 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) } /* In ring mode we need to fill the desc3 because it is used as buffer */ -static void stmmac_init_desc3(struct dma_desc *p) +static void init_desc3(struct dma_desc *p) { p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); } -static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) +static void clean_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr; struct stmmac_priv *priv = tx_q->priv_data; @@ -140,7 +137,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) p->des3 = 0; } -static int stmmac_set_16kib_bfsize(int mtu) +static int set_16kib_bfsize(int mtu) { int ret = 0; if (unlikely(mtu >= BUF_SIZE_8KiB)) @@ -149,10 +146,10 @@ static int stmmac_set_16kib_bfsize(int mtu) } const struct stmmac_mode_ops ring_mode_ops = { - .is_jumbo_frm = stmmac_is_jumbo_frm, - .jumbo_frm = stmmac_jumbo_frm, - .refill_desc3 = stmmac_refill_desc3, - .init_desc3 = stmmac_init_desc3, - .clean_desc3 = stmmac_clean_desc3, - .set_16kib_bfsize = stmmac_set_16kib_bfsize, + .is_jumbo_frm = is_jumbo_frm, + .jumbo_frm = jumbo_frm, + .refill_desc3 = refill_desc3, + .init_desc3 = init_desc3, + .clean_desc3 = clean_desc3, + .set_16kib_bfsize = set_16kib_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index da50451f8999..025efbf6145c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -76,11 +76,43 @@ struct stmmac_rx_queue { struct napi_struct napi ____cacheline_aligned_in_smp; }; +struct stmmac_tc_entry { + bool in_use; + bool in_hw; + bool is_last; + bool is_frag; + void *frag_ptr; + unsigned int table_pos; + u32 handle; + u32 prio; + struct { + u32 match_data; + u32 match_en; + u8 af:1; + u8 rf:1; + u8 im:1; + u8 nc:1; + u8 res1:4; + u8 frame_offset; + u8 ok_index; + u8 dma_ch_no; + u32 res2; + } __packed val; +}; + +#define STMMAC_PPS_MAX 4 +struct stmmac_pps_cfg { + bool available; + struct timespec64 start; + struct timespec64 period; +}; + struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; + bool tx_timer_armed; int tx_coalesce; int hwts_tx_en; @@ -97,7 +129,7 @@ struct stmmac_priv { struct net_device *dev; struct device *device; struct mac_device_info *hw; - spinlock_t lock; + struct mutex lock; /* RX Queue */ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES]; @@ -130,10 +162,13 @@ struct stmmac_priv { int eee_active; int tx_lpi_timer; unsigned int mode; + unsigned int chain_mode; int extend_desc; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; + u32 sub_second_inc; + u32 systime_flags; u32 adv_ts; int use_riwt; int irq_wake; @@ -150,6 +185,14 @@ struct stmmac_priv { unsigned long state; struct workqueue_struct *wq; struct work_struct service_task; + + /* TC Handling */ + unsigned int tc_entries_max; + unsigned int tc_off_max; + struct stmmac_tc_entry *tc_entries; + + /* Pulse Per Second output */ + struct stmmac_pps_cfg pps[STMMAC_PPS_MAX]; }; enum stmmac_state { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 2c6ed47704fc..5710864fa809 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -291,11 +291,9 @@ static int stmmac_ethtool_get_link_ksettings(struct net_device *dev, cmd->base.speed = priv->xstats.pcs_speed; /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (!priv->hw->mac->pcs_get_adv_lp) + if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv)) return -EOPNOTSUPP; /* should never happen indeed */ - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv); - /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ ethtool_convert_link_mode_to_legacy_u32( @@ -392,13 +390,9 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full); - spin_lock(&priv->lock); - - if (priv->hw->mac->pcs_ctrl_ane) - priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, - priv->hw->ps, 0); - - spin_unlock(&priv->lock); + mutex_lock(&priv->lock); + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); + mutex_unlock(&priv->lock); return 0; } @@ -442,8 +436,8 @@ static void stmmac_ethtool_gregs(struct net_device *dev, memset(reg_space, 0x0, REG_SPACE_SIZE); - priv->hw->mac->dump_regs(priv->hw, reg_space); - priv->hw->dma->dump_regs(priv->ioaddr, reg_space); + stmmac_dump_mac_regs(priv, priv->hw, reg_space); + stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); /* Copy DMA registers to where ethtool expects them */ memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); @@ -454,15 +448,13 @@ stmmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct stmmac_priv *priv = netdev_priv(netdev); + struct rgmii_adv adv_lp; pause->rx_pause = 0; pause->tx_pause = 0; - if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { - struct rgmii_adv adv_lp; - + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { pause->autoneg = 1; - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); if (!adv_lp.pause) return; } else { @@ -488,12 +480,10 @@ stmmac_set_pauseparam(struct net_device *netdev, u32 tx_cnt = priv->plat->tx_queues_to_use; struct phy_device *phy = netdev->phydev; int new_pause = FLOW_OFF; + struct rgmii_adv adv_lp; - if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { - struct rgmii_adv adv_lp; - + if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) { pause->autoneg = 1; - priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); if (!adv_lp.pause) return -EOPNOTSUPP; } else { @@ -515,37 +505,32 @@ stmmac_set_pauseparam(struct net_device *netdev, return phy_start_aneg(phy); } - priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, - priv->pause, tx_cnt); + stmmac_flow_ctrl(priv, priv->hw, phy->duplex, priv->flow_ctrl, + priv->pause, tx_cnt); return 0; } static void stmmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { - const char *(*dump)(struct stmmac_safety_stats *stats, int index, - unsigned long *count); struct stmmac_priv *priv = netdev_priv(dev); u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; unsigned long count; - int i, j = 0; - - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { - dump = priv->hw->mac->safety_feat_dump; + int i, j = 0, ret; + if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { - if (dump(&priv->sstats, i, &count)) + if (!stmmac_safety_feat_dump(priv, &priv->sstats, i, + &count, NULL)) data[j++] = count; } } /* Update the DMA HW counters for dwmac10/100 */ - if (priv->hw->dma->dma_diagnostic_fr) - priv->hw->dma->dma_diagnostic_fr(&dev->stats, - (void *) &priv->xstats, - priv->ioaddr); - else { + ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats, + priv->ioaddr); + if (ret) { /* If supported, for new GMAC chips expose the MMC counters */ if (priv->dma_cap.rmon) { dwmac_mmc_read(priv->mmcaddr, &priv->mmc); @@ -565,11 +550,10 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, priv->xstats.phy_eee_wakeup_error_n = val; } - if ((priv->hw->mac->debug) && - (priv->synopsys_id >= DWMAC_CORE_3_50)) - priv->hw->mac->debug(priv->ioaddr, - (void *)&priv->xstats, - rx_queues_count, tx_queues_count); + if (priv->synopsys_id >= DWMAC_CORE_3_50) + stmmac_mac_debug(priv, priv->ioaddr, + (void *)&priv->xstats, + rx_queues_count, tx_queues_count); } for (i = 0; i < STMMAC_STATS_LEN; i++) { char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; @@ -581,8 +565,6 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, static int stmmac_get_sset_count(struct net_device *netdev, int sset) { struct stmmac_priv *priv = netdev_priv(netdev); - const char *(*dump)(struct stmmac_safety_stats *stats, int index, - unsigned long *count); int i, len, safety_len = 0; switch (sset) { @@ -591,11 +573,11 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) if (priv->dma_cap.rmon) len += STMMAC_MMC_STATS_LEN; - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { - dump = priv->hw->mac->safety_feat_dump; - + if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { - if (dump(&priv->sstats, i, NULL)) + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, NULL)) safety_len++; } @@ -613,17 +595,15 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) int i; u8 *p = data; struct stmmac_priv *priv = netdev_priv(dev); - const char *(*dump)(struct stmmac_safety_stats *stats, int index, - unsigned long *count); switch (stringset) { case ETH_SS_STATS: - if (priv->dma_cap.asp && priv->hw->mac->safety_feat_dump) { - dump = priv->hw->mac->safety_feat_dump; + if (priv->dma_cap.asp) { for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) { - const char *desc = dump(&priv->sstats, i, NULL); - - if (desc) { + const char *desc; + if (!stmmac_safety_feat_dump(priv, + &priv->sstats, i, + NULL, &desc)) { memcpy(p, desc, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } @@ -652,12 +632,12 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); - spin_lock_irq(&priv->lock); + mutex_lock(&priv->lock); if (device_can_wakeup(priv->device)) { wol->supported = WAKE_MAGIC | WAKE_UCAST; wol->wolopts = priv->wolopts; } - spin_unlock_irq(&priv->lock); + mutex_unlock(&priv->lock); } static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -686,9 +666,9 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) disable_irq_wake(priv->wol_irq); } - spin_lock_irq(&priv->lock); + mutex_lock(&priv->lock); priv->wolopts = wol->wolopts; - spin_unlock_irq(&priv->lock); + mutex_unlock(&priv->lock); return 0; } @@ -810,7 +790,7 @@ static int stmmac_set_coalesce(struct net_device *dev, priv->tx_coal_frames = ec->tx_max_coalesced_frames; priv->tx_coal_timer = ec->tx_coalesce_usecs; priv->rx_riwt = rx_riwt; - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt); + stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 08c19ebd5306..8d9cc2157afd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -24,13 +24,13 @@ #include "common.h" #include "stmmac_ptp.h" -static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) +static void config_hw_tstamping(void __iomem *ioaddr, u32 data) { writel(data, ioaddr + PTP_TCR); } -static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, - u32 ptp_clock, int gmac4) +static void config_sub_second_increment(void __iomem *ioaddr, + u32 ptp_clock, int gmac4, u32 *ssinc) { u32 value = readl(ioaddr + PTP_TCR); unsigned long data; @@ -57,10 +57,11 @@ static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, writel(reg_value, ioaddr + PTP_SSIR); - return data; + if (ssinc) + *ssinc = data; } -static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) +static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) { int limit; u32 value; @@ -85,7 +86,7 @@ static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec) return 0; } -static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) +static int config_addend(void __iomem *ioaddr, u32 addend) { u32 value; int limit; @@ -109,8 +110,8 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) return 0; } -static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, - int add_sub, int gmac4) +static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, + int add_sub, int gmac4) { u32 value; int limit; @@ -152,7 +153,7 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, return 0; } -static u64 stmmac_get_systime(void __iomem *ioaddr) +static void get_systime(void __iomem *ioaddr, u64 *systime) { u64 ns; @@ -161,14 +162,15 @@ static u64 stmmac_get_systime(void __iomem *ioaddr) /* Get the TSS and convert sec time value to nanosecond */ ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; - return ns; + if (systime) + *systime = ns; } const struct stmmac_hwtimestamp stmmac_ptp = { - .config_hw_tstamping = stmmac_config_hw_tstamping, - .init_systime = stmmac_init_systime, - .config_sub_second_increment = stmmac_config_sub_second_increment, - .config_addend = stmmac_config_addend, - .adjust_systime = stmmac_adjust_systime, - .get_systime = stmmac_get_systime, + .config_hw_tstamping = config_hw_tstamping, + .init_systime = init_systime, + .config_sub_second_increment = config_sub_second_increment, + .config_addend = config_addend, + .adjust_systime = adjust_systime, + .get_systime = get_systime, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b65e2d144698..11fb7c777d89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -45,11 +45,13 @@ #include <linux/seq_file.h> #endif /* CONFIG_DEBUG_FS */ #include <linux/net_tstamp.h> +#include <net/pkt_cls.h> #include "stmmac_ptp.h" #include "stmmac.h" #include <linux/reset.h> #include <linux/of_mdio.h> #include "dwmac1000.h" +#include "hwif.h" #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) @@ -335,8 +337,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) /* Check and enter in LPI mode */ if (!priv->tx_path_in_lpi_mode) - priv->hw->mac->set_eee_mode(priv->hw, - priv->plat->en_tx_lpi_clockgating); + stmmac_set_eee_mode(priv, priv->hw, + priv->plat->en_tx_lpi_clockgating); } /** @@ -347,7 +349,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv) */ void stmmac_disable_eee_mode(struct stmmac_priv *priv) { - priv->hw->mac->reset_eee_mode(priv->hw); + stmmac_reset_eee_mode(priv, priv->hw); del_timer_sync(&priv->eee_ctrl_timer); priv->tx_path_in_lpi_mode = false; } @@ -379,7 +381,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { struct net_device *ndev = priv->dev; int interface = priv->plat->interface; - unsigned long flags; bool ret = false; if ((interface != PHY_INTERFACE_MODE_MII) && @@ -406,19 +407,19 @@ bool stmmac_eee_init(struct stmmac_priv *priv) * changed). * In that case the driver disable own timers. */ - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (priv->eee_active) { netdev_dbg(priv->dev, "disable EEE\n"); del_timer_sync(&priv->eee_ctrl_timer); - priv->hw->mac->set_eee_timer(priv->hw, 0, - tx_lpi_timer); + stmmac_set_eee_timer(priv, priv->hw, 0, + tx_lpi_timer); } priv->eee_active = 0; - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); goto out; } /* Activate the EEE and start timers */ - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (!priv->eee_active) { priv->eee_active = 1; timer_setup(&priv->eee_ctrl_timer, @@ -426,15 +427,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv) mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); - priv->hw->mac->set_eee_timer(priv->hw, - STMMAC_DEFAULT_LIT_LS, - tx_lpi_timer); + stmmac_set_eee_timer(priv, priv->hw, + STMMAC_DEFAULT_LIT_LS, tx_lpi_timer); } /* Set HW EEE according to the speed */ - priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link); + stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link); ret = true; - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); } @@ -464,9 +464,9 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, return; /* check tx tstamp status */ - if (priv->hw->desc->get_tx_timestamp_status(p)) { + if (stmmac_get_tx_timestamp_status(priv, p)) { /* get the valid tstamp */ - ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); + stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); @@ -502,8 +502,8 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, desc = np; /* Check if timestamp is available */ - if (priv->hw->desc->get_rx_timestamp_status(p, np, priv->adv_ts)) { - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { + stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); @@ -707,20 +707,24 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; if (!priv->hwts_tx_en && !priv->hwts_rx_en) - priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); + stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); else { value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ts_master_en | snap_type_sel); - priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); + stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); /* program Sub Second Increment reg */ - sec_inc = priv->hw->ptp->config_sub_second_increment( - priv->ptpaddr, priv->plat->clk_ptp_rate, - priv->plat->has_gmac4); + stmmac_config_sub_second_increment(priv, + priv->ptpaddr, priv->plat->clk_ptp_rate, + priv->plat->has_gmac4, &sec_inc); temp = div_u64(1000000000ULL, sec_inc); + /* Store sub second increment and flags for later use */ + priv->sub_second_inc = sec_inc; + priv->systime_flags = value; + /* calculate default added value: * formula is : * addend = (2^32)/freq_div_ratio; @@ -728,15 +732,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) */ temp = (u64)(temp << 32); priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); - priv->hw->ptp->config_addend(priv->ptpaddr, - priv->default_addend); + stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); /* initialize system time */ ktime_get_real_ts64(&now); /* lower 32 bits of tv_sec are safe until y2106 */ - priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, - now.tv_nsec); + stmmac_init_systime(priv, priv->ptpaddr, + (u32)now.tv_sec, now.tv_nsec); } return copy_to_user(ifr->ifr_data, &config, @@ -770,7 +773,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) netdev_info(priv->dev, "IEEE 1588-2008 Advanced Timestamp supported\n"); - priv->hw->ptp = &stmmac_ptp; priv->hwts_tx_en = 0; priv->hwts_rx_en = 0; @@ -795,8 +797,8 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) { u32 tx_cnt = priv->plat->tx_queues_to_use; - priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, - priv->pause, tx_cnt); + stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, + priv->pause, tx_cnt); } /** @@ -812,13 +814,12 @@ static void stmmac_adjust_link(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; - unsigned long flags; bool new_state = false; if (!phydev) return; - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); if (phydev->link) { u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); @@ -877,7 +878,7 @@ static void stmmac_adjust_link(struct net_device *dev) if (new_state && netif_msg_link(priv)) phy_print_status(phydev); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); if (phydev->is_pseudo_fixed_link) /* Stop PHY layer to call the hook to adjust the link in case @@ -1008,7 +1009,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv) head_rx = (void *)rx_q->dma_rx; /* Display RX ring */ - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); + stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); } } @@ -1029,7 +1030,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv) else head_tx = (void *)tx_q->dma_tx; - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); + stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); } } @@ -1073,13 +1074,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) /* Clear the RX descriptors */ for (i = 0; i < DMA_RX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, - priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, + priv->use_riwt, priv->mode, + (i == DMA_RX_SIZE - 1)); else - priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], - priv->use_riwt, priv->mode, - (i == DMA_RX_SIZE - 1)); + stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], + priv->use_riwt, priv->mode, + (i == DMA_RX_SIZE - 1)); } /** @@ -1097,13 +1098,11 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) /* Clear the TX descriptors */ for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, + priv->mode, (i == DMA_TX_SIZE - 1)); else - priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], + priv->mode, (i == DMA_TX_SIZE - 1)); } /** @@ -1159,14 +1158,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return -EINVAL; } - if (priv->synopsys_id >= DWMAC_CORE_4_00) - p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); - else - p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); + stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]); - if ((priv->hw->mode->init_desc3) && - (priv->dma_buf_sz == BUF_SIZE_16KiB)) - priv->hw->mode->init_desc3(p); + if (priv->dma_buf_sz == BUF_SIZE_16KiB) + stmmac_init_desc3(priv, p); return 0; } @@ -1232,13 +1227,14 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) { struct stmmac_priv *priv = netdev_priv(dev); u32 rx_count = priv->plat->rx_queues_to_use; - unsigned int bfsize = 0; int ret = -ENOMEM; + int bfsize = 0; int queue; int i; - if (priv->hw->mode->set_16kib_bfsize) - bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); + bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); + if (bfsize < 0) + bfsize = 0; if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); @@ -1282,13 +1278,11 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) - priv->hw->mode->init(rx_q->dma_erx, - rx_q->dma_rx_phy, - DMA_RX_SIZE, 1); + stmmac_mode_init(priv, rx_q->dma_erx, + rx_q->dma_rx_phy, DMA_RX_SIZE, 1); else - priv->hw->mode->init(rx_q->dma_rx, - rx_q->dma_rx_phy, - DMA_RX_SIZE, 0); + stmmac_mode_init(priv, rx_q->dma_rx, + rx_q->dma_rx_phy, DMA_RX_SIZE, 0); } } @@ -1335,13 +1329,11 @@ static int init_dma_tx_desc_rings(struct net_device *dev) /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) - priv->hw->mode->init(tx_q->dma_etx, - tx_q->dma_tx_phy, - DMA_TX_SIZE, 1); + stmmac_mode_init(priv, tx_q->dma_etx, + tx_q->dma_tx_phy, DMA_TX_SIZE, 1); else - priv->hw->mode->init(tx_q->dma_tx, - tx_q->dma_tx_phy, - DMA_TX_SIZE, 0); + stmmac_mode_init(priv, tx_q->dma_tx, + tx_q->dma_tx_phy, DMA_TX_SIZE, 0); } for (i = 0; i < DMA_TX_SIZE; i++) { @@ -1351,14 +1343,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev) else p = tx_q->dma_tx + i; - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - p->des0 = 0; - p->des1 = 0; - p->des2 = 0; - p->des3 = 0; - } else { - p->des2 = 0; - } + stmmac_clear_desc(priv, p); tx_q->tx_skbuff_dma[i].buf = 0; tx_q->tx_skbuff_dma[i].map_as_page = false; @@ -1664,7 +1649,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) for (queue = 0; queue < rx_queues_count; queue++) { mode = priv->plat->rx_queues_cfg[queue].mode_to_use; - priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); + stmmac_rx_queue_enable(priv, priv->hw, mode, queue); } } @@ -1678,7 +1663,7 @@ static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); - priv->hw->dma->start_rx(priv->ioaddr, chan); + stmmac_start_rx(priv, priv->ioaddr, chan); } /** @@ -1691,7 +1676,7 @@ static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); - priv->hw->dma->start_tx(priv->ioaddr, chan); + stmmac_start_tx(priv, priv->ioaddr, chan); } /** @@ -1704,7 +1689,7 @@ static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); - priv->hw->dma->stop_rx(priv->ioaddr, chan); + stmmac_stop_rx(priv, priv->ioaddr, chan); } /** @@ -1717,7 +1702,7 @@ static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) { netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); - priv->hw->dma->stop_tx(priv->ioaddr, chan); + stmmac_stop_tx(priv, priv->ioaddr, chan); } /** @@ -1804,23 +1789,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) } /* configure all channels */ - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - for (chan = 0; chan < rx_channels_count; chan++) { - qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + for (chan = 0; chan < rx_channels_count; chan++) { + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; - priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, - rxfifosz, qmode); - } + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, + rxfifosz, qmode); + } - for (chan = 0; chan < tx_channels_count; chan++) { - qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; + for (chan = 0; chan < tx_channels_count; chan++) { + qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, - txfifosz, qmode); - } - } else { - priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, - rxfifosz); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, + txfifosz, qmode); } } @@ -1851,9 +1831,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) else p = tx_q->dma_tx + entry; - status = priv->hw->desc->tx_status(&priv->dev->stats, - &priv->xstats, p, - priv->ioaddr); + status = stmmac_tx_status(priv, &priv->dev->stats, + &priv->xstats, p, priv->ioaddr); /* Check if the descriptor is owned by the DMA */ if (unlikely(status & tx_dma_own)) break; @@ -1891,8 +1870,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) tx_q->tx_skbuff_dma[entry].map_as_page = false; } - if (priv->hw->mode->clean_desc3) - priv->hw->mode->clean_desc3(tx_q, p); + stmmac_clean_desc3(priv, tx_q, p); tx_q->tx_skbuff_dma[entry].last_segment = false; tx_q->tx_skbuff_dma[entry].is_jumbo = false; @@ -1904,7 +1882,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) tx_q->tx_skbuff[entry] = NULL; } - priv->hw->desc->release_tx_desc(p, priv->mode); + stmmac_release_tx_desc(priv, p, priv->mode); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); } @@ -1929,16 +1907,6 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) netif_tx_unlock(priv->dev); } -static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) -{ - priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); -} - -static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) -{ - priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); -} - /** * stmmac_tx_err - to manage the tx error * @priv: driver private structure @@ -1957,13 +1925,11 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) dma_free_tx_skbufs(priv, chan); for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) - priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, + priv->mode, (i == DMA_TX_SIZE - 1)); else - priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], - priv->mode, - (i == DMA_TX_SIZE - 1)); + stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], + priv->mode, (i == DMA_TX_SIZE - 1)); tx_q->dirty_tx = 0; tx_q->cur_tx = 0; tx_q->mss = 0; @@ -2003,31 +1969,22 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, rxfifosz /= rx_channels_count; txfifosz /= tx_channels_count; - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, - rxfifosz, rxqmode); - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, - txfifosz, txqmode); - } else { - priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, - rxfifosz); - } + stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); + stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); } static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) { - bool ret = false; - - /* Safety features are only available in cores >= 5.10 */ - if (priv->synopsys_id < DWMAC_CORE_5_10) - return ret; - if (priv->hw->mac->safety_feat_irq_status) - ret = priv->hw->mac->safety_feat_irq_status(priv->dev, - priv->ioaddr, priv->dma_cap.asp, &priv->sstats); + int ret; - if (ret) + ret = stmmac_safety_feat_irq_status(priv, priv->dev, + priv->ioaddr, priv->dma_cap.asp, &priv->sstats); + if (ret && (ret != -EINVAL)) { stmmac_global_err(priv); - return ret; + return true; + } + + return false; } /** @@ -2045,7 +2002,11 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) tx_channel_count : rx_channel_count; u32 chan; bool poll_scheduled = false; - int status[channels_to_check]; + int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; + + /* Make sure we never check beyond our status buffer. */ + if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) + channels_to_check = ARRAY_SIZE(status); /* Each DMA channel can be used for rx and tx simultaneously, yet * napi_struct is embedded in struct stmmac_rx_queue rather than in a @@ -2054,16 +2015,15 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) * all tx queues rather than just a single tx queue. */ for (chan = 0; chan < channels_to_check; chan++) - status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr, - &priv->xstats, - chan); + status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); for (chan = 0; chan < rx_channel_count; chan++) { if (likely(status[chan] & handle_rx)) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, chan); + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); __napi_schedule(&rx_q->napi); poll_scheduled = true; } @@ -2084,7 +2044,8 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) &priv->rx_queue[0]; if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, chan); + stmmac_disable_dma_irq(priv, + priv->ioaddr, chan); __napi_schedule(&rx_q->napi); } break; @@ -2126,14 +2087,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; - priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; - } else { - priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; - priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; - } - dwmac_mmc_intr_all_mask(priv->mmcaddr); if (priv->dma_cap.rmon) { @@ -2144,32 +2097,6 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) } /** - * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors - * @priv: driver private structure - * Description: select the Enhanced/Alternate or Normal descriptors. - * In case of Enhanced/Alternate, it checks if the extended descriptors are - * supported by the HW capability register. - */ -static void stmmac_selec_desc_mode(struct stmmac_priv *priv) -{ - if (priv->plat->enh_desc) { - dev_info(priv->device, "Enhanced/Alternate descriptors\n"); - - /* GMAC older than 3.50 has no extended descriptors */ - if (priv->synopsys_id >= DWMAC_CORE_3_50) { - dev_info(priv->device, "Enabled extended descriptors\n"); - priv->extend_desc = 1; - } else - dev_warn(priv->device, "Extended descriptors not supported\n"); - - priv->hw->desc = &enh_desc_ops; - } else { - dev_info(priv->device, "Normal descriptors\n"); - priv->hw->desc = &ndesc_ops; - } -} - -/** * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. * @priv: driver private structure * Description: @@ -2180,15 +2107,7 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) */ static int stmmac_get_hw_features(struct stmmac_priv *priv) { - u32 ret = 0; - - if (priv->hw->dma->get_hw_feature) { - priv->hw->dma->get_hw_feature(priv->ioaddr, - &priv->dma_cap); - ret = 1; - } - - return ret; + return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; } /** @@ -2201,8 +2120,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv) static void stmmac_check_ether_addr(struct stmmac_priv *priv) { if (!is_valid_ether_addr(priv->dev->dev_addr)) { - priv->hw->mac->get_umac_addr(priv->hw, - priv->dev->dev_addr, 0); + stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); if (!is_valid_ether_addr(priv->dev->dev_addr)) eth_hw_addr_random(priv->dev); netdev_info(priv->dev, "device MAC address %pM\n", @@ -2222,10 +2140,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; - u32 dummy_dma_rx_phy = 0; - u32 dummy_dma_tx_phy = 0; u32 chan = 0; int atds = 0; int ret = 0; @@ -2238,59 +2155,47 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) atds = 1; - ret = priv->hw->dma->reset(priv->ioaddr); + ret = stmmac_reset(priv, priv->ioaddr); if (ret) { dev_err(priv->device, "Failed to reset the dma\n"); return ret; } - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - /* DMA Configuration */ - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, - dummy_dma_tx_phy, dummy_dma_rx_phy, atds); + /* DMA RX Channel Configuration */ + for (chan = 0; chan < rx_channels_count; chan++) { + rx_q = &priv->rx_queue[chan]; - /* DMA RX Channel Configuration */ - for (chan = 0; chan < rx_channels_count; chan++) { - rx_q = &priv->rx_queue[chan]; + stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + rx_q->dma_rx_phy, chan); - priv->hw->dma->init_rx_chan(priv->ioaddr, - priv->plat->dma_cfg, - rx_q->dma_rx_phy, chan); + rx_q->rx_tail_addr = rx_q->dma_rx_phy + + (DMA_RX_SIZE * sizeof(struct dma_desc)); + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, chan); + } - rx_q->rx_tail_addr = rx_q->dma_rx_phy + - (DMA_RX_SIZE * sizeof(struct dma_desc)); - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, - rx_q->rx_tail_addr, - chan); - } + /* DMA TX Channel Configuration */ + for (chan = 0; chan < tx_channels_count; chan++) { + tx_q = &priv->tx_queue[chan]; - /* DMA TX Channel Configuration */ - for (chan = 0; chan < tx_channels_count; chan++) { - tx_q = &priv->tx_queue[chan]; + stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, + tx_q->dma_tx_phy, chan); - priv->hw->dma->init_chan(priv->ioaddr, - priv->plat->dma_cfg, - chan); + tx_q->tx_tail_addr = tx_q->dma_tx_phy + + (DMA_TX_SIZE * sizeof(struct dma_desc)); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, + tx_q->tx_tail_addr, chan); + } - priv->hw->dma->init_tx_chan(priv->ioaddr, - priv->plat->dma_cfg, - tx_q->dma_tx_phy, chan); + /* DMA CSR Channel configuration */ + for (chan = 0; chan < dma_csr_ch; chan++) + stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); - tx_q->tx_tail_addr = tx_q->dma_tx_phy + - (DMA_TX_SIZE * sizeof(struct dma_desc)); - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, - tx_q->tx_tail_addr, - chan); - } - } else { - rx_q = &priv->rx_queue[chan]; - tx_q = &priv->tx_queue[chan]; - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, - tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); - } + /* DMA Configuration */ + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); - if (priv->plat->axi && priv->hw->dma->axi) - priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); + if (priv->plat->axi) + stmmac_axi(priv, priv->ioaddr, priv->plat->axi); return ret; } @@ -2336,18 +2241,14 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv) u32 chan; /* set TX ring length */ - if (priv->hw->dma->set_tx_ring_len) { - for (chan = 0; chan < tx_channels_count; chan++) - priv->hw->dma->set_tx_ring_len(priv->ioaddr, - (DMA_TX_SIZE - 1), chan); - } + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_set_tx_ring_len(priv, priv->ioaddr, + (DMA_TX_SIZE - 1), chan); /* set RX ring length */ - if (priv->hw->dma->set_rx_ring_len) { - for (chan = 0; chan < rx_channels_count; chan++) - priv->hw->dma->set_rx_ring_len(priv->ioaddr, - (DMA_RX_SIZE - 1), chan); - } + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_set_rx_ring_len(priv, priv->ioaddr, + (DMA_RX_SIZE - 1), chan); } /** @@ -2363,7 +2264,7 @@ static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) for (queue = 0; queue < tx_queues_count; queue++) { weight = priv->plat->tx_queues_cfg[queue].weight; - priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); + stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); } } @@ -2384,7 +2285,7 @@ static void stmmac_configure_cbs(struct stmmac_priv *priv) if (mode_to_use == MTL_QUEUE_DCB) continue; - priv->hw->mac->config_cbs(priv->hw, + stmmac_config_cbs(priv, priv->hw, priv->plat->tx_queues_cfg[queue].send_slope, priv->plat->tx_queues_cfg[queue].idle_slope, priv->plat->tx_queues_cfg[queue].high_credit, @@ -2406,7 +2307,7 @@ static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) for (queue = 0; queue < rx_queues_count; queue++) { chan = priv->plat->rx_queues_cfg[queue].chan; - priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); + stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); } } @@ -2426,7 +2327,7 @@ static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) continue; prio = priv->plat->rx_queues_cfg[queue].prio; - priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); + stmmac_rx_queue_prio(priv, priv->hw, prio, queue); } } @@ -2446,7 +2347,7 @@ static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) continue; prio = priv->plat->tx_queues_cfg[queue].prio; - priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); + stmmac_tx_queue_prio(priv, priv->hw, prio, queue); } } @@ -2467,7 +2368,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) continue; packet = priv->plat->rx_queues_cfg[queue].pkt_route; - priv->hw->mac->rx_queue_routing(priv->hw, packet, queue); + stmmac_rx_queue_routing(priv, priv->hw, packet, queue); } } @@ -2481,50 +2382,47 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv) u32 rx_queues_count = priv->plat->rx_queues_to_use; u32 tx_queues_count = priv->plat->tx_queues_to_use; - if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) + if (tx_queues_count > 1) stmmac_set_tx_queue_weight(priv); /* Configure MTL RX algorithms */ - if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) - priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, - priv->plat->rx_sched_algorithm); + if (rx_queues_count > 1) + stmmac_prog_mtl_rx_algorithms(priv, priv->hw, + priv->plat->rx_sched_algorithm); /* Configure MTL TX algorithms */ - if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) - priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, - priv->plat->tx_sched_algorithm); + if (tx_queues_count > 1) + stmmac_prog_mtl_tx_algorithms(priv, priv->hw, + priv->plat->tx_sched_algorithm); /* Configure CBS in AVB TX queues */ - if (tx_queues_count > 1 && priv->hw->mac->config_cbs) + if (tx_queues_count > 1) stmmac_configure_cbs(priv); /* Map RX MTL to DMA channels */ - if (priv->hw->mac->map_mtl_to_dma) - stmmac_rx_queue_dma_chan_map(priv); + stmmac_rx_queue_dma_chan_map(priv); /* Enable MAC RX Queues */ - if (priv->hw->mac->rx_queue_enable) - stmmac_mac_enable_rx_queues(priv); + stmmac_mac_enable_rx_queues(priv); /* Set RX priorities */ - if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) + if (rx_queues_count > 1) stmmac_mac_config_rx_queues_prio(priv); /* Set TX priorities */ - if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) + if (tx_queues_count > 1) stmmac_mac_config_tx_queues_prio(priv); /* Set RX routing */ - if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) + if (rx_queues_count > 1) stmmac_mac_config_rx_queues_routing(priv); } static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) { - if (priv->hw->mac->safety_feat_config && priv->dma_cap.asp) { + if (priv->dma_cap.asp) { netdev_info(priv->dev, "Enabling Safety Features\n"); - priv->hw->mac->safety_feat_config(priv->ioaddr, - priv->dma_cap.asp); + stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); } else { netdev_info(priv->dev, "No Safety Features support found\n"); } @@ -2559,7 +2457,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Copy the MAC addr into the HW */ - priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); + stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { @@ -2575,17 +2473,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Initialize the MAC Core */ - priv->hw->mac->core_init(priv->hw, dev); + stmmac_core_init(priv, priv->hw, dev); /* Initialize MTL*/ - if (priv->synopsys_id >= DWMAC_CORE_4_00) - stmmac_mtl_configuration(priv); + stmmac_mtl_configuration(priv); /* Initialize Safety Features */ - if (priv->synopsys_id >= DWMAC_CORE_5_10) - stmmac_safety_feat_configuration(priv); + stmmac_safety_feat_configuration(priv); - ret = priv->hw->mac->rx_ipc(priv->hw); + ret = stmmac_rx_ipc(priv, priv->hw); if (!ret) { netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); priv->plat->rx_coe = STMMAC_RX_COE_NONE; @@ -2593,7 +2489,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Enable the MAC Rx/Tx */ - priv->hw->mac->set_mac(priv->ioaddr, true); + stmmac_mac_set(priv, priv->ioaddr, true); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -2623,13 +2519,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; - if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { - priv->rx_riwt = MAX_DMA_RIWT; - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); + if (priv->use_riwt) { + ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); + if (!ret) + priv->rx_riwt = MAX_DMA_RIWT; } - if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) - priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); + if (priv->hw->pcs) + stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); /* set TX and RX rings length */ stmmac_set_rings_length(priv); @@ -2637,7 +2534,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) /* Enable TSO */ if (priv->tso) { for (chan = 0; chan < tx_cnt; chan++) - priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); + stmmac_enable_tso(priv, priv->ioaddr, 1, chan); } return 0; @@ -2808,7 +2705,7 @@ static int stmmac_release(struct net_device *dev) free_dma_desc_resources(priv); /* Disable the MAC Rx/Tx */ - priv->hw->mac->set_mac(priv->ioaddr, false); + stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(dev); @@ -2851,10 +2748,10 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? TSO_MAX_BUFF_SIZE : tmp_len; - priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, - 0, 1, - (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), - 0, 0); + stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, + 0, 1, + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), + 0, 0); tmp_len -= TSO_MAX_BUFF_SIZE; } @@ -2926,7 +2823,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) /* set new MSS value if needed */ if (mss != tx_q->mss) { mss_desc = tx_q->dma_tx + tx_q->cur_tx; - priv->hw->desc->set_mss(mss_desc, mss); + stmmac_set_mss(priv, mss_desc, mss); tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); @@ -3012,7 +2909,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) STMMAC_COAL_TIMER(priv->tx_coal_timer)); } else { priv->tx_count_frames = 0; - priv->hw->desc->set_tx_ic(desc); + stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; } @@ -3022,11 +2919,11 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) priv->hwts_tx_en)) { /* declare that device is doing timestamping */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - priv->hw->desc->enable_tx_timestamp(first); + stmmac_enable_tx_timestamp(priv, first); } /* Complete the first descriptor before granting the DMA */ - priv->hw->desc->prepare_tso_tx_desc(first, 1, + stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, pay_len, 1, tx_q->tx_skbuff_dma[first_entry].last_segment, @@ -3040,7 +2937,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) * sure that MSS's own bit is the last thing written. */ dma_wmb(); - priv->hw->desc->set_tx_owner(mss_desc); + stmmac_set_tx_owner(priv, mss_desc); } /* The own bit must be the latest setting done when prepare the @@ -3054,8 +2951,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, tx_q->cur_tx, first, nfrags); - priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, - 0); + stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); pr_info(">>> frame to be transmitted: "); print_pkt(skb->data, skb_headlen(skb)); @@ -3063,8 +2959,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, - queue); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); return NETDEV_TX_OK; @@ -3136,12 +3031,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) - is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); + is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); - if (unlikely(is_jumbo) && likely(priv->synopsys_id < - DWMAC_CORE_4_00)) { - entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); - if (unlikely(entry < 0)) + if (unlikely(is_jumbo)) { + entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); + if (unlikely(entry < 0) && (entry != -EINVAL)) goto dma_map_err; } @@ -3164,19 +3058,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; /* should reuse desc w/o issues */ tx_q->tx_skbuff_dma[entry].buf = des; - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - desc->des0 = cpu_to_le32(des); - else - desc->des2 = cpu_to_le32(des); + + stmmac_set_desc_addr(priv, desc, des); tx_q->tx_skbuff_dma[entry].map_as_page = true; tx_q->tx_skbuff_dma[entry].len = len; tx_q->tx_skbuff_dma[entry].last_segment = last_segment; /* Prepare the descriptor and set the own bit too */ - priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, - priv->mode, 1, last_segment, - skb->len); + stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, + priv->mode, 1, last_segment, skb->len); } /* Only the last descriptor gets to point to the skb. */ @@ -3203,7 +3094,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) else tx_head = (void *)tx_q->dma_tx; - priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); + stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); netdev_dbg(priv->dev, ">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); @@ -3223,13 +3114,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) * element in case of no SG. */ priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { + if (likely(priv->tx_coal_frames > priv->tx_count_frames) && + !priv->tx_timer_armed) { mod_timer(&priv->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); + priv->tx_timer_armed = true; } else { priv->tx_count_frames = 0; - priv->hw->desc->set_tx_ic(desc); + stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + priv->tx_timer_armed = false; } skb_tx_timestamp(skb); @@ -3247,10 +3141,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; tx_q->tx_skbuff_dma[first_entry].buf = des; - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - first->des0 = cpu_to_le32(des); - else - first->des2 = cpu_to_le32(des); + + stmmac_set_desc_addr(priv, first, des); tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; @@ -3259,13 +3151,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->hwts_tx_en)) { /* declare that device is doing timestamping */ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - priv->hw->desc->enable_tx_timestamp(first); + stmmac_enable_tx_timestamp(priv, first); } /* Prepare the first descriptor setting the OWN bit too */ - priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, - csum_insertion, priv->mode, 1, - last_segment, skb->len); + stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, + csum_insertion, priv->mode, 1, last_segment, + skb->len); /* The own bit must be the latest setting done when prepare the * descriptor and then barrier is needed to make sure that @@ -3276,11 +3168,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - if (priv->synopsys_id < DWMAC_CORE_4_00) - priv->hw->dma->enable_dma_transmission(priv->ioaddr); - else - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, - queue); + stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); return NETDEV_TX_OK; @@ -3364,14 +3253,8 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) break; } - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { - p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); - p->des1 = 0; - } else { - p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); - } - if (priv->hw->mode->refill_desc3) - priv->hw->mode->refill_desc3(rx_q, p); + stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]); + stmmac_refill_desc3(priv, rx_q, p); if (rx_q->rx_zeroc_thresh > 0) rx_q->rx_zeroc_thresh--; @@ -3381,10 +3264,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) } dma_wmb(); - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); - else - priv->hw->desc->set_rx_owner(p); + stmmac_set_rx_owner(priv, p, priv->use_riwt); dma_wmb(); @@ -3418,7 +3298,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) else rx_head = (void *)rx_q->dma_rx; - priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); + stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); } while (count < limit) { int status; @@ -3431,8 +3311,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) p = rx_q->dma_rx + entry; /* read the status of the incoming frame */ - status = priv->hw->desc->rx_status(&priv->dev->stats, - &priv->xstats, p); + status = stmmac_rx_status(priv, &priv->dev->stats, + &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ if (unlikely(status & dma_own)) break; @@ -3449,11 +3329,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) prefetch(np); - if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) - priv->hw->desc->rx_extended_status(&priv->dev->stats, - &priv->xstats, - rx_q->dma_erx + - entry); + if (priv->extend_desc) + stmmac_rx_extended_status(priv, &priv->dev->stats, + &priv->xstats, rx_q->dma_erx + entry); if (unlikely(status == discard_frame)) { priv->dev->stats.rx_errors++; if (priv->hwts_rx_en && !priv->extend_desc) { @@ -3474,12 +3352,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) int frame_len; unsigned int des; - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) - des = le32_to_cpu(p->des0); - else - des = le32_to_cpu(p->des2); - - frame_len = priv->hw->desc->get_rx_frame_len(p, coe); + stmmac_get_desc_addr(priv, p, &des); + frame_len = stmmac_get_rx_frame_len(priv, p, coe); /* If frame length is greater than skb buffer size * (preallocated during init) then the packet is @@ -3621,7 +3495,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget) work_done = stmmac_rx(priv, budget, rx_q->queue_index); if (work_done < budget) { napi_complete_done(napi, work_done); - stmmac_enable_dma_irq(priv, chan); + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); } return work_done; } @@ -3654,7 +3528,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - priv->hw->mac->set_filter(priv->hw, dev); + stmmac_set_filter(priv, priv->hw, dev); } /** @@ -3727,7 +3601,7 @@ static int stmmac_set_features(struct net_device *netdev, /* No check needed because rx_coe has been set before and it will be * fixed in case of issue. */ - priv->hw->mac->rx_ipc(priv->hw); + stmmac_rx_ipc(priv, priv->hw); return 0; } @@ -3771,8 +3645,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) /* To handle GMAC own interrupts */ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { - int status = priv->hw->mac->host_irq_status(priv->hw, - &priv->xstats); + int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); + int mtl_status; if (unlikely(status)) { /* For LPI we need to save the tx status */ @@ -3782,21 +3656,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->tx_path_in_lpi_mode = false; } - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - for (queue = 0; queue < queues_count; queue++) { - struct stmmac_rx_queue *rx_q = - &priv->rx_queue[queue]; - - status |= - priv->hw->mac->host_mtl_irq_status(priv->hw, - queue); + for (queue = 0; queue < queues_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; - if (status & CORE_IRQ_MTL_RX_OVERFLOW && - priv->hw->dma->set_rx_tail_ptr) - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, - rx_q->rx_tail_addr, + mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, queue); - } + if (mtl_status != -EINVAL) + status |= mtl_status; + + if (status & CORE_IRQ_MTL_RX_OVERFLOW) + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, + rx_q->rx_tail_addr, + queue); } /* PCS link status */ @@ -3860,6 +3731,58 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return ret; } +static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct stmmac_priv *priv = cb_priv; + int ret = -EOPNOTSUPP; + + stmmac_disable_all_queues(priv); + + switch (type) { + case TC_SETUP_CLSU32: + if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) + ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); + break; + default: + break; + } + + stmmac_enable_all_queues(priv); + return ret; +} + +static int stmmac_setup_tc_block(struct stmmac_priv *priv, + struct tc_block_offload *f) +{ + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, + priv, priv); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, + void *type_data) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + switch (type) { + case TC_SETUP_BLOCK: + return stmmac_setup_tc_block(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + static int stmmac_set_mac_address(struct net_device *ndev, void *addr) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -3869,7 +3792,7 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr) if (ret) return ret; - priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0); + stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); return ret; } @@ -4098,6 +4021,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, .ndo_do_ioctl = stmmac_ioctl, + .ndo_setup_tc = stmmac_setup_tc, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = stmmac_poll_controller, #endif @@ -4145,49 +4069,17 @@ static void stmmac_service_task(struct work_struct *work) */ static int stmmac_hw_init(struct stmmac_priv *priv) { - struct mac_device_info *mac; - - /* Identify the MAC HW device */ - if (priv->plat->setup) { - mac = priv->plat->setup(priv); - } else if (priv->plat->has_gmac) { - priv->dev->priv_flags |= IFF_UNICAST_FLT; - mac = dwmac1000_setup(priv->ioaddr, - priv->plat->multicast_filter_bins, - priv->plat->unicast_filter_entries, - &priv->synopsys_id); - } else if (priv->plat->has_gmac4) { - priv->dev->priv_flags |= IFF_UNICAST_FLT; - mac = dwmac4_setup(priv->ioaddr, - priv->plat->multicast_filter_bins, - priv->plat->unicast_filter_entries, - &priv->synopsys_id); - } else { - mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); - } - if (!mac) - return -ENOMEM; - - priv->hw = mac; + int ret; /* dwmac-sun8i only work in chain mode */ if (priv->plat->has_sun8i) chain_mode = 1; + priv->chain_mode = chain_mode; - /* To use the chained or ring mode */ - if (priv->synopsys_id >= DWMAC_CORE_4_00) { - priv->hw->mode = &dwmac4_ring_mode_ops; - } else { - if (chain_mode) { - priv->hw->mode = &chain_mode_ops; - dev_info(priv->device, "Chain mode enabled\n"); - priv->mode = STMMAC_CHAIN_MODE; - } else { - priv->hw->mode = &ring_mode_ops; - dev_info(priv->device, "Ring mode enabled\n"); - priv->mode = STMMAC_RING_MODE; - } - } + /* Initialize HW Interface */ + ret = stmmac_hwif_init(priv); + if (ret) + return ret; /* Get the HW capability (new GMAC newer than 3.50a) */ priv->hw_cap_support = stmmac_get_hw_features(priv); @@ -4221,12 +4113,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv) dev_info(priv->device, "No HW DMA feature register supported\n"); } - /* To use alternate (extended), normal or GMAC4 descriptor structures */ - if (priv->synopsys_id >= DWMAC_CORE_4_00) - priv->hw->desc = &dwmac4_desc_ops; - else - stmmac_selec_desc_mode(priv); - if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; dev_info(priv->device, "RX Checksum Offload Engine supported\n"); @@ -4335,6 +4221,11 @@ int stmmac_dvr_probe(struct device *device, ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + ret = stmmac_tc_init(priv, priv); + if (!ret) { + ndev->hw_features |= NETIF_F_HW_TC; + } + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; priv->tso = true; @@ -4386,7 +4277,7 @@ int stmmac_dvr_probe(struct device *device, (8 * priv->plat->rx_queues_to_use)); } - spin_lock_init(&priv->lock); + mutex_init(&priv->lock); /* If a specific clk_csr value is passed from the platform * this means that the CSR Clock Range selection cannot be @@ -4458,7 +4349,7 @@ int stmmac_dvr_remove(struct device *dev) stmmac_stop_all_dma(priv); - priv->hw->mac->set_mac(priv->ioaddr, false); + stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); if (priv->plat->stmmac_rst) @@ -4470,6 +4361,7 @@ int stmmac_dvr_remove(struct device *dev) priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); + mutex_destroy(&priv->lock); free_netdev(ndev); return 0; @@ -4487,7 +4379,6 @@ int stmmac_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); - unsigned long flags; if (!ndev || !netif_running(ndev)) return 0; @@ -4495,7 +4386,7 @@ int stmmac_suspend(struct device *dev) if (ndev->phydev) phy_stop(ndev->phydev); - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); netif_device_detach(ndev); stmmac_stop_all_queues(priv); @@ -4507,16 +4398,16 @@ int stmmac_suspend(struct device *dev) /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) { - priv->hw->mac->pmt(priv->hw, priv->wolopts); + stmmac_pmt(priv, priv->hw, priv->wolopts); priv->irq_wake = 1; } else { - priv->hw->mac->set_mac(priv->ioaddr, false); + stmmac_mac_set(priv, priv->ioaddr, false); pinctrl_pm_select_sleep_state(priv->device); /* Disable clock in case of PWM is off */ clk_disable(priv->plat->pclk); clk_disable(priv->plat->stmmac_clk); } - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); priv->oldlink = false; priv->speed = SPEED_UNKNOWN; @@ -4561,7 +4452,6 @@ int stmmac_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); - unsigned long flags; if (!netif_running(ndev)) return 0; @@ -4573,9 +4463,9 @@ int stmmac_resume(struct device *dev) * from another devices (e.g. serial console). */ if (device_may_wakeup(priv->device)) { - spin_lock_irqsave(&priv->lock, flags); - priv->hw->mac->pmt(priv->hw, 0); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_lock(&priv->lock); + stmmac_pmt(priv, priv->hw, 0); + mutex_unlock(&priv->lock); priv->irq_wake = 0; } else { pinctrl_pm_select_default_state(priv->device); @@ -4589,7 +4479,7 @@ int stmmac_resume(struct device *dev) netif_device_attach(ndev); - spin_lock_irqsave(&priv->lock, flags); + mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); @@ -4603,7 +4493,7 @@ int stmmac_resume(struct device *dev) stmmac_start_all_queues(priv); - spin_unlock_irqrestore(&priv->lock, flags); + mutex_unlock(&priv->lock); if (ndev->phydev) phy_start(ndev->phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index f5f37bfa1d58..5df1a608e566 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -233,10 +233,7 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->phy_mask = mdio_bus_data->phy_mask; new_bus->parent = priv->device; - if (mdio_node) - err = of_mdiobus_register(new_bus, mdio_node); - else - err = mdiobus_register(new_bus); + err = of_mdiobus_register(new_bus, mdio_node); if (err != 0) { dev_err(dev, "Cannot register the MDIO bus\n"); goto bus_register_fail; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ebd3e5ffa73c..6d141f3931eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -472,7 +472,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } if (of_device_is_compatible(np, "snps,dwmac-4.00") || - of_device_is_compatible(np, "snps,dwmac-4.10a")) { + of_device_is_compatible(np, "snps,dwmac-4.10a") || + of_device_is_compatible(np, "snps,dwmac-4.20a")) { plat->has_gmac4 = 1; plat->has_gmac = 0; plat->pmt = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index e471a903c654..0cb0e39a2be9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -49,9 +49,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) addend = neg_adj ? (addend - diff) : (addend + diff); spin_lock_irqsave(&priv->ptp_lock, flags); - - priv->hw->ptp->config_addend(priv->ptpaddr, addend); - + stmmac_config_addend(priv, priv->ptpaddr, addend); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; @@ -84,10 +82,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) nsec = reminder; spin_lock_irqsave(&priv->ptp_lock, flags); - - priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj, - priv->plat->has_gmac4); - + stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, + priv->plat->has_gmac4); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; @@ -110,9 +106,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; spin_lock_irqsave(&priv->ptp_lock, flags); - - ns = priv->hw->ptp->get_systime(priv->ptpaddr); - + stmmac_get_systime(priv, priv->ptpaddr, &ns); spin_unlock_irqrestore(&priv->ptp_lock, flags); *ts = ns_to_timespec64(ns); @@ -137,9 +131,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp, unsigned long flags; spin_lock_irqsave(&priv->ptp_lock, flags); - - priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec); - + stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec); spin_unlock_irqrestore(&priv->ptp_lock, flags); return 0; @@ -148,17 +140,43 @@ static int stmmac_set_time(struct ptp_clock_info *ptp, static int stmmac_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { - return -EOPNOTSUPP; + struct stmmac_priv *priv = + container_of(ptp, struct stmmac_priv, ptp_clock_ops); + struct stmmac_pps_cfg *cfg; + int ret = -EOPNOTSUPP; + unsigned long flags; + + switch (rq->type) { + case PTP_CLK_REQ_PEROUT: + cfg = &priv->pps[rq->perout.index]; + + cfg->start.tv_sec = rq->perout.start.sec; + cfg->start.tv_nsec = rq->perout.start.nsec; + cfg->period.tv_sec = rq->perout.period.sec; + cfg->period.tv_nsec = rq->perout.period.nsec; + + spin_lock_irqsave(&priv->ptp_lock, flags); + ret = stmmac_flex_pps_config(priv, priv->ioaddr, + rq->perout.index, cfg, on, + priv->sub_second_inc, + priv->systime_flags); + spin_unlock_irqrestore(&priv->ptp_lock, flags); + break; + default: + break; + } + + return ret; } /* structure describing a PTP hardware clock */ -static const struct ptp_clock_info stmmac_ptp_clock_ops = { +static struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac_ptp_clock", .max_adj = 62500000, .n_alarm = 0, .n_ext_ts = 0, - .n_per_out = 0, + .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */ .n_pins = 0, .pps = 0, .adjfreq = stmmac_adjust_freq, @@ -176,6 +194,16 @@ static const struct ptp_clock_info stmmac_ptp_clock_ops = { */ void stmmac_ptp_register(struct stmmac_priv *priv) { + int i; + + for (i = 0; i < priv->dma_cap.pps_out_num; i++) { + if (i >= STMMAC_PPS_MAX) + break; + priv->pps[i].available = true; + } + + stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num; + spin_lock_init(&priv->ptp_lock); priv->ptp_clock_ops = stmmac_ptp_clock_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c new file mode 100644 index 000000000000..881c94b73e2f --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates. + * stmmac TC Handling (HW only) + */ + +#include <net/pkt_cls.h> +#include <net/tc_act/tc_gact.h> +#include "common.h" +#include "dwmac4.h" +#include "dwmac5.h" +#include "stmmac.h" + +static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry) +{ + memset(entry, 0, sizeof(*entry)); + entry->in_use = true; + entry->is_last = true; + entry->is_frag = false; + entry->prio = ~0x0; + entry->handle = 0; + entry->val.match_data = 0x0; + entry->val.match_en = 0x0; + entry->val.af = 1; + entry->val.dma_ch_no = 0x0; +} + +static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls, + bool free) +{ + struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL; + u32 loc = cls->knode.handle; + int i; + + for (i = 0; i < priv->tc_entries_max; i++) { + entry = &priv->tc_entries[i]; + if (!entry->in_use && !first && free) + first = entry; + if (entry->handle == loc && !free) + dup = entry; + } + + if (dup) + return dup; + if (first) { + first->handle = loc; + first->in_use = true; + + /* Reset HW values */ + memset(&first->val, 0, sizeof(first->val)); + } + + return first; +} + +static int tc_fill_actions(struct stmmac_tc_entry *entry, + struct stmmac_tc_entry *frag, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *action_entry = entry; + const struct tc_action *act; + struct tcf_exts *exts; + LIST_HEAD(actions); + + exts = cls->knode.exts; + if (!tcf_exts_has_actions(exts)) + return -EINVAL; + if (frag) + action_entry = frag; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(act, &actions, list) { + /* Accept */ + if (is_tcf_gact_ok(act)) { + action_entry->val.af = 1; + break; + } + /* Drop */ + if (is_tcf_gact_shot(act)) { + action_entry->val.rf = 1; + break; + } + + /* Unsupported */ + return -EINVAL; + } + + return 0; +} + +static int tc_fill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry, *frag = NULL; + struct tc_u32_sel *sel = cls->knode.sel; + u32 off, data, mask, real_off, rem; + u32 prio = cls->common.prio; + int ret; + + /* Only 1 match per entry */ + if (sel->nkeys <= 0 || sel->nkeys > 1) + return -EINVAL; + + off = sel->keys[0].off << sel->offshift; + data = sel->keys[0].val; + mask = sel->keys[0].mask; + + switch (ntohs(cls->common.protocol)) { + case ETH_P_ALL: + break; + case ETH_P_IP: + off += ETH_HLEN; + break; + default: + return -EINVAL; + } + + if (off > priv->tc_off_max) + return -EINVAL; + + real_off = off / 4; + rem = off % 4; + + entry = tc_find_entry(priv, cls, true); + if (!entry) + return -EINVAL; + + if (rem) { + frag = tc_find_entry(priv, cls, true); + if (!frag) { + ret = -EINVAL; + goto err_unuse; + } + + entry->frag_ptr = frag; + entry->val.match_en = (mask << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.match_data = (data << (rem * 8)) & + GENMASK(31, rem * 8); + entry->val.frame_offset = real_off; + entry->prio = prio; + + frag->val.match_en = (mask >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.match_data = (data >> (rem * 8)) & + GENMASK(rem * 8 - 1, 0); + frag->val.frame_offset = real_off + 1; + frag->prio = prio; + frag->is_frag = true; + } else { + entry->frag_ptr = NULL; + entry->val.match_en = mask; + entry->val.match_data = data; + entry->val.frame_offset = real_off; + entry->prio = prio; + } + + ret = tc_fill_actions(entry, frag, cls); + if (ret) + goto err_unuse; + + return 0; + +err_unuse: + if (frag) + frag->in_use = false; + entry->in_use = false; + return ret; +} + +static void tc_unfill_entry(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + struct stmmac_tc_entry *entry; + + entry = tc_find_entry(priv, cls, false); + if (!entry) + return; + + entry->in_use = false; + if (entry->frag_ptr) { + entry = entry->frag_ptr; + entry->is_frag = false; + entry->in_use = false; + } +} + +static int tc_config_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + int ret; + + ret = tc_fill_entry(priv, cls); + if (ret) + return ret; + + ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); + if (ret) + goto err_unfill; + + return 0; + +err_unfill: + tc_unfill_entry(priv, cls); + return ret; +} + +static int tc_delete_knode(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + int ret; + + /* Set entry and fragments as not used */ + tc_unfill_entry(priv, cls); + + ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries, + priv->tc_entries_max); + if (ret) + return ret; + + return 0; +} + +static int tc_setup_cls_u32(struct stmmac_priv *priv, + struct tc_cls_u32_offload *cls) +{ + switch (cls->command) { + case TC_CLSU32_REPLACE_KNODE: + tc_unfill_entry(priv, cls); + /* Fall through */ + case TC_CLSU32_NEW_KNODE: + return tc_config_knode(priv, cls); + case TC_CLSU32_DELETE_KNODE: + return tc_delete_knode(priv, cls); + default: + return -EOPNOTSUPP; + } +} + +static int tc_init(struct stmmac_priv *priv) +{ + struct dma_features *dma_cap = &priv->dma_cap; + unsigned int count; + + if (!dma_cap->frpsel) + return -EINVAL; + + switch (dma_cap->frpbs) { + case 0x0: + priv->tc_off_max = 64; + break; + case 0x1: + priv->tc_off_max = 128; + break; + case 0x2: + priv->tc_off_max = 256; + break; + default: + return -EINVAL; + } + + switch (dma_cap->frpes) { + case 0x0: + count = 64; + break; + case 0x1: + count = 128; + break; + case 0x2: + count = 256; + break; + default: + return -EINVAL; + } + + /* Reserve one last filter which lets all pass */ + priv->tc_entries_max = count; + priv->tc_entries = devm_kzalloc(priv->device, + sizeof(*priv->tc_entries) * count, GFP_KERNEL); + if (!priv->tc_entries) + return -ENOMEM; + + tc_fill_all_pass_entry(&priv->tc_entries[count - 1]); + + dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n", + priv->tc_entries_max, priv->tc_off_max); + return 0; +} + +const struct stmmac_tc_ops dwmac510_tc_ops = { + .init = tc_init, + .setup_cls_u32 = tc_setup_cls_u32, +}; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 48a541eb0af2..9263d638bd6d 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_TI config TI_DAVINCI_EMAC tristate "TI DaVinci EMAC Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST select TI_DAVINCI_MDIO select TI_DAVINCI_CPDMA select PHYLIB @@ -30,7 +30,7 @@ config TI_DAVINCI_EMAC config TI_DAVINCI_MDIO tristate "TI DaVinci MDIO Support" - depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST select PHYLIB ---help--- This driver supports TI's DaVinci MDIO module. @@ -40,7 +40,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" - depends on ARCH_DAVINCI || ARCH_OMAP2PLUS + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST ---help--- This driver supports TI's DaVinci CPDMA dma engine. @@ -60,7 +60,7 @@ config TI_CPSW_ALE config TI_CPSW tristate "TI CPSW Switch Support" - depends on ARCH_DAVINCI || ARCH_OMAP2PLUS + depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO select TI_CPSW_PHY_SEL @@ -75,7 +75,7 @@ config TI_CPSW config TI_CPTS bool "TI Common Platform Time Sync (CPTS) Support" - depends on TI_CPSW || TI_KEYSTONE_NETCP + depends on TI_CPSW || TI_KEYSTONE_NETCP || COMPILE_TEST depends on POSIX_TIMERS ---help--- This driver supports the Common Platform Time Sync unit of diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 18013645e76c..0c1adad7415d 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) } dev = bus_find_device(&platform_bus_type, NULL, node, match); - of_node_put(node); + if (!dev) { + dev_err(dev, "unable to find platform device for %pOF\n", node); + goto out; + } + priv = dev_get_drvdata(dev); priv->cpsw_phy_sel(priv, phy_mode, slave); put_device(dev); +out: + of_node_put(node); } EXPORT_SYMBOL_GPL(cpsw_phy_sel); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 28d893b93d30..534596ce00d3 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -29,13 +29,14 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pm_runtime.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_device.h> #include <linux/if_vlan.h> #include <linux/kmemleak.h> +#include <linux/sys_soc.h> #include <linux/pinctrl/consumer.h> @@ -957,7 +958,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget) { u32 ch_map; int num_tx, cur_budget, ch; @@ -984,7 +985,21 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) if (num_tx < budget) { napi_complete(napi_tx); writel(0xff, &cpsw->wr_regs->tx_en); - if (cpsw->quirk_irq && cpsw->tx_irq_disabled) { + } + + return num_tx; +} + +static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_tx); + int num_tx; + + num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget); + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &cpsw->wr_regs->tx_en); + if (cpsw->tx_irq_disabled) { cpsw->tx_irq_disabled = false; enable_irq(cpsw->irqs_table[1]); } @@ -993,7 +1008,7 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) return num_tx; } -static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) +static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget) { u32 ch_map; int num_rx, cur_budget, ch; @@ -1020,7 +1035,21 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) if (num_rx < budget) { napi_complete_done(napi_rx, num_rx); writel(0xff, &cpsw->wr_regs->rx_en); - if (cpsw->quirk_irq && cpsw->rx_irq_disabled) { + } + + return num_rx; +} + +static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) +{ + struct cpsw_common *cpsw = napi_to_cpsw(napi_rx); + int num_rx; + + num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget); + if (num_rx < budget) { + napi_complete_done(napi_rx, num_rx); + writel(0xff, &cpsw->wr_regs->rx_en); + if (cpsw->rx_irq_disabled) { cpsw->rx_irq_disabled = false; enable_irq(cpsw->irqs_table[0]); } @@ -1252,8 +1281,8 @@ static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) for (i = 0; i < ch_stats_len; i++) { line = i % CPSW_STATS_CH_LEN; snprintf(*p, ETH_GSTRING_LEN, - "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx", - i / CPSW_STATS_CH_LEN, + "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx", + (long)(i / CPSW_STATS_CH_LEN), cpsw_gstrings_ch_stats[line].stat_string); *p += ETH_GSTRING_LEN; } @@ -2364,9 +2393,9 @@ static void cpsw_get_channels(struct net_device *ndev, { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; + ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; ch->max_combined = 0; - ch->max_rx = CPSW_MAX_QUEUES; - ch->max_tx = CPSW_MAX_QUEUES; ch->max_other = 0; ch->other_count = 0; ch->rx_count = cpsw->rx_ch_num; @@ -2377,6 +2406,11 @@ static void cpsw_get_channels(struct net_device *ndev, static int cpsw_check_ch_settings(struct cpsw_common *cpsw, struct ethtool_channels *ch) { + if (cpsw->quirk_irq) { + dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed"); + return -EOPNOTSUPP; + } + if (ch->combined_count) return -EINVAL; @@ -2917,44 +2951,20 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) return ret; } -#define CPSW_QUIRK_IRQ BIT(0) - -static const struct platform_device_id cpsw_devtype[] = { - { - /* keep it for existing comaptibles */ - .name = "cpsw", - .driver_data = CPSW_QUIRK_IRQ, - }, { - .name = "am335x-cpsw", - .driver_data = CPSW_QUIRK_IRQ, - }, { - .name = "am4372-cpsw", - .driver_data = 0, - }, { - .name = "dra7-cpsw", - .driver_data = 0, - }, { - /* sentinel */ - } -}; -MODULE_DEVICE_TABLE(platform, cpsw_devtype); - -enum ti_cpsw_type { - CPSW = 0, - AM335X_CPSW, - AM4372_CPSW, - DRA7_CPSW, -}; - static const struct of_device_id cpsw_of_mtable[] = { - { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], }, - { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], }, - { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], }, - { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], }, + { .compatible = "ti,cpsw"}, + { .compatible = "ti,am335x-cpsw"}, + { .compatible = "ti,am4372-cpsw"}, + { .compatible = "ti,dra7-cpsw"}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, cpsw_of_mtable); +static const struct soc_device_attribute cpsw_soc_devices[] = { + { .family = "AM33xx", .revision = "ES1.0"}, + { /* sentinel */ } +}; + static int cpsw_probe(struct platform_device *pdev) { struct clk *clk; @@ -2966,9 +2976,9 @@ static int cpsw_probe(struct platform_device *pdev) void __iomem *ss_regs; void __iomem *cpts_regs; struct resource *res, *ss_res; - const struct of_device_id *of_id; struct gpio_descs *mode; u32 slave_offset, sliver_offset, slave_size; + const struct soc_device_attribute *soc; struct cpsw_common *cpsw; int ret = 0, i; int irq; @@ -3141,6 +3151,10 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dt_ret; } + soc = soc_device_match(cpsw_soc_devices); + if (soc) + cpsw->quirk_irq = 1; + cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); if (IS_ERR(cpsw->txv[0].ch)) { dev_err(priv->dev, "error initializing tx dma channel\n"); @@ -3180,19 +3194,16 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_dma_ret; } - of_id = of_match_device(cpsw_of_mtable, &pdev->dev); - if (of_id) { - pdev->id_entry = of_id->data; - if (pdev->id_entry->driver_data) - cpsw->quirk_irq = true; - } - ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); - netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &cpsw->napi_rx, + cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll, + CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &cpsw->napi_tx, + cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll, + CPSW_POLL_WEIGHT); cpsw_split_res(ndev); /* register the network device */ diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index e7b76f6b4f67..6f63c8729afc 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -294,7 +294,8 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp) delay = CPTS_SKB_TX_WORK_TIMEOUT; spin_unlock_irqrestore(&cpts->lock, flags); - pr_debug("cpts overflow check at %lld.%09lu\n", ts.tv_sec, ts.tv_nsec); + pr_debug("cpts overflow check at %lld.%09ld\n", + (long long)ts.tv_sec, ts.tv_nsec); return (long)delay; } @@ -564,7 +565,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, cpts->refclk = devm_clk_get(dev, "cpts"); if (IS_ERR(cpts->refclk)) { dev_err(dev, "Failed to get cpts refclk\n"); - return ERR_PTR(PTR_ERR(cpts->refclk)); + return ERR_CAST(cpts->refclk); } clk_prepare(cpts->refclk); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 31ae04117f0a..cdbddf16dd29 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -191,7 +191,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr) return; WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), - "cpdma_desc_pool size %d != avail %d", + "cpdma_desc_pool size %zd != avail %zd", gen_pool_size(pool->gen_pool), gen_pool_avail(pool->gen_pool)); if (pool->cpumap) @@ -1080,7 +1080,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, writel_relaxed(buffer, &desc->hw_buffer); writel_relaxed(len, &desc->hw_len); writel_relaxed(mode | len, &desc->hw_mode); - writel_relaxed(token, &desc->sw_token); + writel_relaxed((uintptr_t)token, &desc->sw_token); writel_relaxed(buffer, &desc->sw_buffer); writel_relaxed(len, &desc->sw_len); desc_read(desc, sw_len); @@ -1121,15 +1121,15 @@ static void __cpdma_chan_free(struct cpdma_chan *chan, struct cpdma_desc_pool *pool = ctlr->pool; dma_addr_t buff_dma; int origlen; - void *token; + uintptr_t token; - token = (void *)desc_read(desc, sw_token); + token = desc_read(desc, sw_token); buff_dma = desc_read(desc, sw_buffer); origlen = desc_read(desc, sw_len); dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); cpdma_desc_free(pool, desc, 1); - (*chan->handler)(token, outlen, status); + (*chan->handler)((void *)token, outlen, status); } static int __cpdma_chan_process(struct cpdma_chan *chan) diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 38828ab77eb9..06d7c9e4dcda 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1930,8 +1930,8 @@ static int davinci_emac_probe(struct platform_device *pdev) if (netif_msg_probe(priv)) { dev_notice(&pdev->dev, "DaVinci EMAC Probe found device " - "(regs: %p, irq: %d)\n", - (void *)priv->emac_base_phys, ndev->irq); + "(regs: %pa, irq: %d)\n", + &priv->emac_base_phys, ndev->irq); } pm_runtime_put(&pdev->dev); diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 3c33f4504d8e..a98aedae1b41 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -34,6 +34,7 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/pm_runtime.h> #include <linux/davinci_emac.h> #include <linux/of.h> @@ -227,14 +228,14 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) static inline int wait_for_idle(struct davinci_mdio_data *data) { struct davinci_mdio_regs __iomem *regs = data->regs; - unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); + u32 val, ret; - while (time_after(timeout, jiffies)) { - if (__raw_readl(®s->control) & CONTROL_IDLE) - return 0; - } - dev_err(data->dev, "timed out waiting for idle\n"); - return -ETIMEDOUT; + ret = readl_poll_timeout(®s->control, val, val & CONTROL_IDLE, + 0, MDIO_TIMEOUT * 1000); + if (ret) + dev_err(data->dev, "timed out waiting for idle\n"); + + return ret; } static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) @@ -320,7 +321,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return ret; } -#if IS_ENABLED(CONFIG_OF) static int davinci_mdio_probe_dt(struct mdio_platform_data *data, struct platform_device *pdev) { @@ -338,7 +338,6 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, return 0; } -#endif #if IS_ENABLED(CONFIG_OF) static const struct davinci_mdio_of_param of_cpsw_mdio_data = { @@ -373,7 +372,7 @@ static int davinci_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - if (dev->of_node) { + if (IS_ENABLED(CONFIG_OF) && dev->of_node) { const struct of_device_id *of_id; ret = davinci_mdio_probe_dt(&data->pdata, pdev); @@ -428,12 +427,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) * defined to support backward compatibility with DTs which assume that * Davinci MDIO will always scan the bus for PHYs detection. */ - if (dev->of_node && of_get_child_count(dev->of_node)) { + if (dev->of_node && of_get_child_count(dev->of_node)) data->skip_scan = true; - ret = of_mdiobus_register(data->bus, dev->of_node); - } else { - ret = mdiobus_register(data->bus); - } + + ret = of_mdiobus_register(data->bus, dev->of_node); if (ret) goto bail_out; diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 8900a6fad318..c4ffdf47bad5 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -33,6 +33,8 @@ #define SGMII_LINK_MAC_MAC_FORCED 2 #define SGMII_LINK_MAC_FIBER 3 #define SGMII_LINK_MAC_PHY_NO_MDIO 4 +#define RGMII_LINK_MAC_PHY 5 +#define RGMII_LINK_MAC_PHY_NO_MDIO 7 #define XGMII_LINK_MAC_PHY 10 #define XGMII_LINK_MAC_MAC_FORCED 11 @@ -212,6 +214,7 @@ struct netcp_module { int (*add_vid)(void *intf_priv, int vid); int (*del_vid)(void *intf_priv, int vid); int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd); + int (*set_rx_mode)(void *intf_priv, bool promisc); /* used internally */ struct list_head module_list; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index f5a7eb22d0f5..e40aa3e31af2 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1509,6 +1509,24 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) } } +static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc) +{ + struct netcp_intf_modpriv *priv; + struct netcp_module *module; + int error; + + for_each_module(netcp, priv) { + module = priv->netcp_module; + if (!module->set_rx_mode) + continue; + + error = module->set_rx_mode(priv->module_priv, promisc); + if (error) + return error; + } + return 0; +} + static void netcp_set_rx_mode(struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); @@ -1538,6 +1556,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) /* finally sweep and callout into modules */ netcp_addr_sweep_del(netcp); netcp_addr_sweep_add(netcp); + netcp_set_promiscuous(netcp, promisc); spin_unlock(&netcp->lock); } @@ -2155,8 +2174,13 @@ static int netcp_probe(struct platform_device *pdev) struct device_node *child, *interfaces; struct netcp_device *netcp_device; struct device *dev = &pdev->dev; + struct netcp_module *module; int ret; + if (!knav_dma_device_ready() || + !knav_qmss_device_ready()) + return -EPROBE_DEFER; + if (!node) { dev_err(dev, "could not find device info\n"); return -ENODEV; @@ -2203,6 +2227,14 @@ static int netcp_probe(struct platform_device *pdev) /* Add the device instance to the list */ list_add_tail(&netcp_device->device_list, &netcp_devices); + /* Probe & attach any modules already registered */ + mutex_lock(&netcp_modules_lock); + for_each_netcp_module(module) { + ret = netcp_module_probe(netcp_device, module); + if (ret < 0) + dev_err(dev, "module(%s) probe failed\n", module->name); + } + mutex_unlock(&netcp_modules_lock); return 0; probe_quit_interface: diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 56dbc0b9fedc..6e455a27a8de 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -21,6 +21,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/of_mdio.h> +#include <linux/of_net.h> #include <linux/of_address.h> #include <linux/if_vlan.h> #include <linux/ptp_classify.h> @@ -42,7 +43,7 @@ /* 1G Ethernet SS defines */ #define GBE_MODULE_NAME "netcp-gbe" -#define GBE_SS_VERSION_14 0x4ed21104 +#define GBE_SS_VERSION_14 0x4ed2 #define GBE_SS_REG_INDEX 0 #define GBE_SGMII34_REG_INDEX 1 @@ -72,6 +73,11 @@ #define IS_SS_ID_NU(d) \ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) +#define IS_SS_ID_VER_14(d) \ + (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14) +#define IS_SS_ID_2U(d) \ + (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U) + #define GBENU_SS_REG_INDEX 0 #define GBENU_SM_REG_INDEX 1 #define GBENU_SGMII_MODULE_OFFSET 0x100 @@ -86,7 +92,7 @@ /* 10G Ethernet SS defines */ #define XGBE_MODULE_NAME "netcp-xgbe" -#define XGBE_SS_VERSION_10 0x4ee42100 +#define XGBE_SS_VERSION_10 0x4ee4 #define XGBE_SS_REG_INDEX 0 #define XGBE_SM_REG_INDEX 1 @@ -166,6 +172,11 @@ #define GBE_RXHOOK_ORDER 0 #define GBE_DEFAULT_ALE_AGEOUT 30 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY) +#define SLAVE_LINK_IS_RGMII(s) \ + (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \ + ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO)) +#define SLAVE_LINK_IS_SGMII(s) \ + ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO) #define NETCP_LINK_STATE_INVALID -1 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \ @@ -549,6 +560,7 @@ struct gbe_ss_regs { struct gbe_ss_regs_ofs { u16 id_ver; u16 control; + u16 rgmii_status; /* 2U */ }; struct gbe_switch_regs { @@ -591,6 +603,7 @@ struct gbe_port_regs { struct gbe_port_regs_ofs { u16 port_vlan; u16 tx_pri_map; + u16 rx_pri_map; u16 sa_lo; u16 sa_hi; u16 ts_ctl; @@ -695,6 +708,7 @@ struct gbe_slave { u32 link_interface; u32 mac_control; u8 phy_port_t; + struct device_node *node; struct device_node *phy_node; struct ts_ctl ts_ctl; struct list_head slave_list; @@ -1915,7 +1929,7 @@ static void keystone_get_ethtool_stats(struct net_device *ndev, gbe_dev = gbe_intf->gbe_dev; spin_lock_bh(&gbe_dev->hw_stats_lock); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) gbe_update_stats_ver14(gbe_dev, data); else gbe_update_stats(gbe_dev, data); @@ -2091,8 +2105,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, ALE_PORT_STATE_FORWARD); if (ndev && slave->open && - slave->link_interface != SGMII_LINK_MAC_PHY && - slave->link_interface != XGMII_LINK_MAC_PHY) + ((slave->link_interface != SGMII_LINK_MAC_PHY) && + (slave->link_interface != RGMII_LINK_MAC_PHY) && + (slave->link_interface != XGMII_LINK_MAC_PHY))) netif_carrier_on(ndev); } else { writel(mac_control, GBE_REG_ADDR(slave, emac_regs, @@ -2101,8 +2116,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); if (ndev && - slave->link_interface != SGMII_LINK_MAC_PHY && - slave->link_interface != XGMII_LINK_MAC_PHY) + ((slave->link_interface != SGMII_LINK_MAC_PHY) && + (slave->link_interface != RGMII_LINK_MAC_PHY) && + (slave->link_interface != XGMII_LINK_MAC_PHY))) netif_carrier_off(ndev); } @@ -2115,23 +2131,35 @@ static bool gbe_phy_link_status(struct gbe_slave *slave) return !slave->phy || slave->phy->link; } +#define RGMII_REG_STATUS_LINK BIT(0) + +static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status) +{ + u32 val = 0; + + val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status)); + *status = !!(val & RGMII_REG_STATUS_LINK); +} + static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, struct gbe_slave *slave, struct net_device *ndev) { - int sp = slave->slave_num; - int phy_link_state, sgmii_link_state = 1, link_state; + bool sw_link_state = true, phy_link_state; + int sp = slave->slave_num, link_state; if (!slave->open) return; - if (!SLAVE_LINK_IS_XGMII(slave)) { - sgmii_link_state = - netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); - } + if (SLAVE_LINK_IS_RGMII(slave)) + netcp_2u_rgmii_get_port_link(gbe_dev, + &sw_link_state); + if (SLAVE_LINK_IS_SGMII(slave)) + sw_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); phy_link_state = gbe_phy_link_status(slave); - link_state = phy_link_state & sgmii_link_state; + link_state = phy_link_state & sw_link_state; if (atomic_xchg(&slave->link_state, link_state) != link_state) netcp_ethss_link_state_action(gbe_dev, ndev, slave, @@ -2205,7 +2233,7 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, max_rx_len = NETCP_MAX_FRAME_SIZE; /* Enable correct MII mode at SS level */ - if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) && + if (IS_SS_ID_XGBE(gbe_dev) && (slave->link_interface >= XGMII_LINK_MAC_PHY)) { xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control)); xgmii_mode |= (1 << slave->slave_num); @@ -2236,7 +2264,8 @@ static void gbe_slave_stop(struct gbe_intf *intf) struct gbe_priv *gbe_dev = intf->gbe_dev; struct gbe_slave *slave = intf->slave; - gbe_sgmii_rtreset(gbe_dev, slave, true); + if (!IS_SS_ID_2U(gbe_dev)) + gbe_sgmii_rtreset(gbe_dev, slave, true); gbe_port_reset(slave); /* Disable forwarding */ cpsw_ale_control_set(gbe_dev->ale, slave->port_num, @@ -2271,11 +2300,20 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) void (*hndlr)(struct net_device *) = gbe_adjust_link; - gbe_sgmii_config(priv, slave); + if (!IS_SS_ID_2U(priv)) + gbe_sgmii_config(priv, slave); gbe_port_reset(slave); - gbe_sgmii_rtreset(priv, slave, false); + if (!IS_SS_ID_2U(priv)) + gbe_sgmii_rtreset(priv, slave, false); gbe_port_config(priv, slave, priv->rx_packet_max); gbe_set_slave_mac(slave, gbe_intf); + /* For NU & 2U switch, map the vlan priorities to zero + * as we only configure to use priority 0 + */ + if (IS_SS_ID_MU(priv)) + writel(HOST_TX_PRI_MAP_DEFAULT, + GBE_REG_ADDR(slave, port_regs, rx_pri_map)); + /* enable forwarding */ cpsw_ale_control_set(priv->ale, slave->port_num, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); @@ -2286,6 +2324,21 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) has_phy = true; phy_mode = PHY_INTERFACE_MODE_SGMII; slave->phy_port_t = PORT_MII; + } else if (slave->link_interface == RGMII_LINK_MAC_PHY) { + has_phy = true; + phy_mode = of_get_phy_mode(slave->node); + /* if phy-mode is not present, default to + * PHY_INTERFACE_MODE_RGMII + */ + if (phy_mode < 0) + phy_mode = PHY_INTERFACE_MODE_RGMII; + + if (!phy_interface_mode_is_rgmii(phy_mode)) { + dev_err(priv->dev, + "Unsupported phy mode %d\n", phy_mode); + return -EINVAL; + } + slave->phy_port_t = PORT_MII; } else if (slave->link_interface == XGMII_LINK_MAC_PHY) { has_phy = true; phy_mode = PHY_INTERFACE_MODE_NA; @@ -2293,7 +2346,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) } if (has_phy) { - if (priv->ss_version == XGBE_SS_VERSION_10) + if (IS_SS_ID_XGBE(priv)) hndlr = xgbe_adjust_link; slave->phy = of_phy_connect(gbe_intf->ndev, @@ -2722,6 +2775,61 @@ static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req) } #endif /* CONFIG_TI_CPTS */ +static int gbe_set_rx_mode(void *intf_priv, bool promisc) +{ + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; + struct cpsw_ale *ale = gbe_dev->ale; + unsigned long timeout; + int i, ret = -ETIMEDOUT; + + /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and + * slaves are port 1 and up + */ + for (i = 0; i <= gbe_dev->num_slaves; i++) { + cpsw_ale_control_set(ale, i, + ALE_PORT_NOLEARN, !!promisc); + cpsw_ale_control_set(ale, i, + ALE_PORT_NO_SA_UPDATE, !!promisc); + } + + if (!promisc) { + /* Don't Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); + dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n"); + return 0; + } + + timeout = jiffies + HZ; + + /* Clear All Untouched entries */ + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + do { + cpu_relax(); + if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) { + ret = 0; + break; + } + + } while (time_after(timeout, jiffies)); + + /* Make sure it is not a false timeout */ + if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) + return ret; + + cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); + + /* Clear all mcast from ALE */ + cpsw_ale_flush_multicast(ale, + GBE_PORT_MASK(gbe_dev->ale_ports), + -1); + + /* Flood All Unicast Packets to Host port */ + cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); + dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n"); + return ret; +} + static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) { struct gbe_intf *gbe_intf = intf_priv; @@ -2764,7 +2872,7 @@ static void netcp_ethss_timer(struct timer_list *t) /* A timer runs as a BH, no need to block them */ spin_lock(&gbe_dev->hw_stats_lock); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) gbe_update_stats_ver14(gbe_dev, NULL); else gbe_update_stats(gbe_dev, NULL); @@ -2807,7 +2915,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev) GBE_RTL_VERSION(reg), GBE_IDENT(reg)); /* For 10G and on NetCP 1.5, use directed to port */ - if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev)) + if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev)) gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO; if (gbe_dev->enable_ale) @@ -2911,8 +3019,10 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, slave->link_interface = SGMII_LINK_MAC_PHY; } + slave->node = node; slave->open = false; if ((slave->link_interface == SGMII_LINK_MAC_PHY) || + (slave->link_interface == RGMII_LINK_MAC_PHY) || (slave->link_interface == XGMII_LINK_MAC_PHY)) slave->phy_node = of_parse_phandle(node, "phy-handle", 0); slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num); @@ -2924,7 +3034,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, /* Emac regs memmap are contiguous but port regs are not */ port_reg_num = slave->slave_num; - if (gbe_dev->ss_version == GBE_SS_VERSION_14) { + if (IS_SS_ID_VER_14(gbe_dev)) { if (slave->slave_num > 1) { port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET; port_reg_num -= 2; @@ -2939,7 +3049,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, emac_reg_ofs = GBENU_EMAC_OFFSET; port_reg_blk_sz = 0x1000; emac_reg_blk_sz = 0x1000; - } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { + } else if (IS_SS_ID_XGBE(gbe_dev)) { port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET; emac_reg_ofs = XGBE10_EMAC_OFFSET; port_reg_blk_sz = 0x30; @@ -2955,7 +3065,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs + (emac_reg_blk_sz * slave->slave_num); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) { + if (IS_SS_ID_VER_14(gbe_dev)) { /* Initialize slave port register offsets */ GBE_SET_REG_OFS(slave, port_regs, port_vlan); GBE_SET_REG_OFS(slave, port_regs, tx_pri_map); @@ -2976,6 +3086,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, /* Initialize slave port register offsets */ GBENU_SET_REG_OFS(slave, port_regs, port_vlan); GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map); + GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map); GBENU_SET_REG_OFS(slave, port_regs, sa_lo); GBENU_SET_REG_OFS(slave, port_regs, sa_hi); GBENU_SET_REG_OFS(slave, port_regs, ts_ctl); @@ -2989,7 +3100,7 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave, GBENU_SET_REG_OFS(slave, emac_regs, mac_control); GBENU_SET_REG_OFS(slave, emac_regs, soft_reset); - } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) { + } else if (IS_SS_ID_XGBE(gbe_dev)) { /* Initialize slave port register offsets */ XGBE_SET_REG_OFS(slave, port_regs, port_vlan); XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map); @@ -3039,7 +3150,8 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, continue; } - gbe_sgmii_config(gbe_dev, slave); + if (!IS_SS_ID_2U(gbe_dev)) + gbe_sgmii_config(gbe_dev, slave); gbe_port_reset(slave); gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max); list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves); @@ -3073,6 +3185,9 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, if (slave->link_interface == SGMII_LINK_MAC_PHY) { phy_mode = PHY_INTERFACE_MODE_SGMII; slave->phy_port_t = PORT_MII; + } else if (slave->link_interface == RGMII_LINK_MAC_PHY) { + phy_mode = PHY_INTERFACE_MODE_RGMII; + slave->phy_port_t = PORT_MII; } else { phy_mode = PHY_INTERFACE_MODE_NA; slave->phy_port_t = PORT_FIBRE; @@ -3080,6 +3195,7 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, for_each_sec_slave(slave, gbe_dev) { if ((slave->link_interface != SGMII_LINK_MAC_PHY) && + (slave->link_interface != RGMII_LINK_MAC_PHY) && (slave->link_interface != XGMII_LINK_MAC_PHY)) continue; slave->phy = @@ -3090,7 +3206,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, if (!slave->phy) { dev_err(dev, "phy not found for slave %d\n", slave->slave_num); - slave->phy = NULL; } else { dev_dbg(dev, "phy found: id is: 0x%s\n", phydev_name(slave->phy)); @@ -3355,7 +3470,7 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->num_stats_mods = gbe_dev->max_num_ports; gbe_dev->et_stats = gbenu_et_stats; - if (IS_SS_ID_NU(gbe_dev)) + if (IS_SS_ID_MU(gbe_dev)) gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); else @@ -3396,7 +3511,9 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, } gbe_dev->switch_regs = regs; - gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + if (!IS_SS_ID_2U(gbe_dev)) + gbe_dev->sgmii_port_regs = + gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; /* Although sgmii modules are mem mapped to one contiguous * region on GBENU devices, setting sgmii_port34_regs allows @@ -3419,6 +3536,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, /* Subsystem registers */ GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); + /* ok to set for MU, but used by 2U only */ + GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status); /* Switch module registers */ GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver); @@ -3464,6 +3583,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, gbe_dev->max_num_slaves = 8; } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) { gbe_dev->max_num_slaves = 1; + gbe_module.set_rx_mode = gbe_set_rx_mode; } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) { gbe_dev->max_num_slaves = 2; } else { @@ -3508,7 +3628,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) ret = set_gbe_ethss14_priv(gbe_dev, node); else if (IS_SS_ID_MU(gbe_dev)) ret = set_gbenu_ethss_priv(gbe_dev, node); @@ -3606,7 +3726,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, spin_lock_bh(&gbe_dev->hw_stats_lock); for (i = 0; i < gbe_dev->num_stats_mods; i++) { - if (gbe_dev->ss_version == GBE_SS_VERSION_14) + if (IS_SS_ID_VER_14(gbe_dev)) gbe_reset_mod_stats_ver14(gbe_dev, i); else gbe_reset_mod_stats(gbe_dev, i); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index e74e1e897864..f24f48f33802 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -900,7 +900,6 @@ static void axienet_dma_err_handler(unsigned long data); * @ndev: Pointer to net_device structure * * Return: 0, on success. - * -ENODEV, if PHY cannot be connected to * non-zero error value on failure * * This is the driver open routine. It calls phy_start to start the PHY device. diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index b919e89a9b93..750eaa53bf0c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -36,6 +36,8 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); #define GENEVE_VER 0 #define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr)) +#define GENEVE_IPV4_HLEN (ETH_HLEN + sizeof(struct iphdr) + GENEVE_BASE_HLEN) +#define GENEVE_IPV6_HLEN (ETH_HLEN + sizeof(struct ipv6hdr) + GENEVE_BASE_HLEN) /* per-network namespace private data for this module */ struct geneve_net { @@ -826,8 +828,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(rt); if (skb_dst(skb)) { - int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) - - GENEVE_BASE_HLEN - info->options_len - 14; + int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN - + info->options_len; skb_dst_update_pmtu(skb, mtu); } @@ -872,8 +874,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(dst); if (skb_dst(skb)) { - int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) - - GENEVE_BASE_HLEN - info->options_len - 14; + int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len; skb_dst_update_pmtu(skb, mtu); } @@ -941,11 +942,10 @@ tx_error: static int geneve_change_mtu(struct net_device *dev, int new_mtu) { - /* Only possible if called internally, ndo_change_mtu path's new_mtu - * is guaranteed to be between dev->min_mtu and dev->max_mtu. - */ if (new_mtu > dev->max_mtu) new_mtu = dev->max_mtu; + else if (new_mtu < dev->min_mtu) + new_mtu = dev->min_mtu; dev->mtu = new_mtu; return 0; @@ -1261,7 +1261,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], } if (data[IFLA_GENEVE_REMOTE6]) { - #if IS_ENABLED(CONFIG_IPV6) +#if IS_ENABLED(CONFIG_IPV6) if (changelink && (ip_tunnel_info_af(info) == AF_INET)) { attrtype = IFLA_GENEVE_REMOTE6; goto change_notsup; @@ -1387,6 +1387,48 @@ change_notsup: return -EOPNOTSUPP; } +static void geneve_link_config(struct net_device *dev, + struct ip_tunnel_info *info, struct nlattr *tb[]) +{ + struct geneve_dev *geneve = netdev_priv(dev); + int ldev_mtu = 0; + + if (tb[IFLA_MTU]) { + geneve_change_mtu(dev, nla_get_u32(tb[IFLA_MTU])); + return; + } + + switch (ip_tunnel_info_af(info)) { + case AF_INET: { + struct flowi4 fl4 = { .daddr = info->key.u.ipv4.dst }; + struct rtable *rt = ip_route_output_key(geneve->net, &fl4); + + if (!IS_ERR(rt) && rt->dst.dev) { + ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV4_HLEN; + ip_rt_put(rt); + } + break; + } +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: { + struct rt6_info *rt = rt6_lookup(geneve->net, + &info->key.u.ipv6.dst, NULL, 0, + NULL, 0); + + if (rt && rt->dst.dev) + ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; + ip6_rt_put(rt); + break; + } +#endif + } + + if (ldev_mtu <= 0) + return; + + geneve_change_mtu(dev, ldev_mtu - info->options_len); +} + static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -1402,8 +1444,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (err) return err; - return geneve_configure(net, dev, extack, &info, metadata, - use_udp6_rx_checksums); + err = geneve_configure(net, dev, extack, &info, metadata, + use_udp6_rx_checksums); + if (err) + return err; + + geneve_link_config(dev, &info, tb); + + return 0; } /* Quiesces the geneve device data path for both TX and RX. @@ -1477,8 +1525,10 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], if (err) return err; - if (!geneve_dst_addr_equal(&geneve->info, &info)) + if (!geneve_dst_addr_equal(&geneve->info, &info)) { dst_cache_reset(&info.dst_cache); + geneve_link_config(dev, &info, tb); + } geneve_quiesce(geneve, &gs4, &gs6); geneve->info = info; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index c180b480f8ef..13e4c1eff353 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -217,7 +217,7 @@ static int kiss_esc_crc(unsigned char *s, unsigned char *d, unsigned short crc, c = *s++; else if (len > 1) c = crc >> 8; - else if (len > 0) + else c = crc & 0xff; len--; diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 1ab97d99b9ba..f41116488079 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -867,7 +867,7 @@ static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) dev->name); goto drop; case E_FRM_ERR: - printk(KERN_WARNING "%s: Framming Error\n", + printk(KERN_WARNING "%s: Framing Error\n", dev->name); goto drop; case E_FLG_SYN_ERR: diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig index 936968d23559..23a2d145813a 100644 --- a/drivers/net/hyperv/Kconfig +++ b/drivers/net/hyperv/Kconfig @@ -1,5 +1,7 @@ config HYPERV_NET tristate "Microsoft Hyper-V virtual network driver" depends on HYPERV + select UCS2_STRING + select FAILOVER help Select this option to enable the Hyper-V virtual network driver. diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 960f06141472..99d8e7398a5b 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -110,7 +110,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */ u16 hashkey_size; /* The offset of the secret key from the beginning of this structure */ - u32 kashkey_offset; + u32 hashkey_offset; u32 processor_masks_offset; u32 num_processor_masks; @@ -237,6 +237,8 @@ void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); #define NVSP_PROTOCOL_VERSION_2 0x30002 #define NVSP_PROTOCOL_VERSION_4 0x40000 #define NVSP_PROTOCOL_VERSION_5 0x50000 +#define NVSP_PROTOCOL_VERSION_6 0x60000 +#define NVSP_PROTOCOL_VERSION_61 0x60001 enum { NVSP_MSG_TYPE_NONE = 0, @@ -308,6 +310,12 @@ enum { NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE, NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE, + + /* Version 6 messages */ + NVSP_MSG6_TYPE_PD_API, + NVSP_MSG6_TYPE_PD_POST_BATCH, + + NVSP_MSG6_MAX = NVSP_MSG6_TYPE_PD_POST_BATCH }; enum { @@ -619,12 +627,168 @@ union nvsp_5_message_uber { struct nvsp_5_send_indirect_table send_table; } __packed; +enum nvsp_6_pd_api_op { + PD_API_OP_CONFIG = 1, + PD_API_OP_SW_DATAPATH, /* Switch Datapath */ + PD_API_OP_OPEN_PROVIDER, + PD_API_OP_CLOSE_PROVIDER, + PD_API_OP_CREATE_QUEUE, + PD_API_OP_FLUSH_QUEUE, + PD_API_OP_FREE_QUEUE, + PD_API_OP_ALLOC_COM_BUF, /* Allocate Common Buffer */ + PD_API_OP_FREE_COM_BUF, /* Free Common Buffer */ + PD_API_OP_MAX +}; + +struct grp_affinity { + u64 mask; + u16 grp; + u16 reserved[3]; +} __packed; + +struct nvsp_6_pd_api_req { + u32 op; + + union { + /* MMIO information is sent from the VM to VSP */ + struct __packed { + u64 mmio_pa; /* MMIO Physical Address */ + u32 mmio_len; + + /* Number of PD queues a VM can support */ + u16 num_subchn; + } config; + + /* Switch Datapath */ + struct __packed { + /* Host Datapath Is PacketDirect */ + u8 host_dpath_is_pd; + + /* Guest PacketDirect Is Enabled */ + u8 guest_pd_enabled; + } sw_dpath; + + /* Open Provider*/ + struct __packed { + u32 prov_id; /* Provider id */ + u32 flag; + } open_prov; + + /* Close Provider */ + struct __packed { + u32 prov_id; + } cls_prov; + + /* Create Queue*/ + struct __packed { + u32 prov_id; + u16 q_id; + u16 q_size; + u8 is_recv_q; + u8 is_rss_q; + u32 recv_data_len; + struct grp_affinity affy; + } cr_q; + + /* Delete Queue*/ + struct __packed { + u32 prov_id; + u16 q_id; + } del_q; + + /* Flush Queue */ + struct __packed { + u32 prov_id; + u16 q_id; + } flush_q; + + /* Allocate Common Buffer */ + struct __packed { + u32 len; + u32 pf_node; /* Preferred Node */ + u16 region_id; + } alloc_com_buf; + + /* Free Common Buffer */ + struct __packed { + u32 len; + u64 pa; /* Physical Address */ + u32 pf_node; /* Preferred Node */ + u16 region_id; + u8 cache_type; + } free_com_buf; + } __packed; +} __packed; + +struct nvsp_6_pd_api_comp { + u32 op; + u32 status; + + union { + struct __packed { + /* actual number of PD queues allocated to the VM */ + u16 num_pd_q; + + /* Num Receive Rss PD Queues */ + u8 num_rss_q; + + u8 is_supported; /* Is supported by VSP */ + u8 is_enabled; /* Is enabled by VSP */ + } config; + + /* Open Provider */ + struct __packed { + u32 prov_id; + } open_prov; + + /* Create Queue */ + struct __packed { + u32 prov_id; + u16 q_id; + u16 q_size; + u32 recv_data_len; + struct grp_affinity affy; + } cr_q; + + /* Allocate Common Buffer */ + struct __packed { + u64 pa; /* Physical Address */ + u32 len; + u32 pf_node; /* Preferred Node */ + u16 region_id; + u8 cache_type; + } alloc_com_buf; + } __packed; +} __packed; + +struct nvsp_6_pd_buf { + u32 region_offset; + u16 region_id; + u16 is_partial:1; + u16 reserved:15; +} __packed; + +struct nvsp_6_pd_batch_msg { + struct nvsp_message_header hdr; + u16 count; + u16 guest2host:1; + u16 is_recv:1; + u16 reserved:14; + struct nvsp_6_pd_buf pd_buf[0]; +} __packed; + +union nvsp_6_message_uber { + struct nvsp_6_pd_api_req pd_req; + struct nvsp_6_pd_api_comp pd_comp; +} __packed; + union nvsp_all_messages { union nvsp_message_init_uber init_msg; union nvsp_1_message_uber v1_msg; union nvsp_2_message_uber v2_msg; union nvsp_4_message_uber v4_msg; union nvsp_5_message_uber v5_msg; + union nvsp_6_message_uber v6_msg; } __packed; /* ALL Messages */ @@ -768,6 +932,8 @@ struct net_device_context { u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; + + struct failover *failover; }; /* Per channel data */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 04f611e6f678..d2ee66c259a7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -525,7 +525,8 @@ static int netvsc_connect_vsp(struct hv_device *device, struct net_device *ndev = hv_get_drvdata(device); static const u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, - NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 + NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5, + NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61 }; struct nvsp_message *init_packet; int ndis_version, i, ret; @@ -651,16 +652,14 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device, sync_change_bit(index, net_device->send_section_map); } -static void netvsc_send_tx_complete(struct netvsc_device *net_device, - struct vmbus_channel *incoming_channel, - struct hv_device *device, +static void netvsc_send_tx_complete(struct net_device *ndev, + struct netvsc_device *net_device, + struct vmbus_channel *channel, const struct vmpacket_descriptor *desc, int budget) { struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id; - struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *ndev_ctx = netdev_priv(ndev); - struct vmbus_channel *channel = device->channel; u16 q_idx = 0; int queue_sends; @@ -674,7 +673,6 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, if (send_index != NETVSC_INVALID_INDEX) netvsc_free_send_slot(net_device, send_index); q_idx = packet->q_idx; - channel = incoming_channel; tx_stats = &net_device->chan_table[q_idx].tx_stats; @@ -704,14 +702,13 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device, } } -static void netvsc_send_completion(struct netvsc_device *net_device, +static void netvsc_send_completion(struct net_device *ndev, + struct netvsc_device *net_device, struct vmbus_channel *incoming_channel, - struct hv_device *device, const struct vmpacket_descriptor *desc, int budget) { - struct nvsp_message *nvsp_packet = hv_pkt_data(desc); - struct net_device *ndev = hv_get_drvdata(device); + const struct nvsp_message *nvsp_packet = hv_pkt_data(desc); switch (nvsp_packet->hdr.msg_type) { case NVSP_MSG_TYPE_INIT_COMPLETE: @@ -725,8 +722,8 @@ static void netvsc_send_completion(struct netvsc_device *net_device, break; case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: - netvsc_send_tx_complete(net_device, incoming_channel, - device, desc, budget); + netvsc_send_tx_complete(ndev, net_device, incoming_channel, + desc, budget); break; default: @@ -1091,12 +1088,11 @@ static void enq_receive_complete(struct net_device *ndev, static int netvsc_receive(struct net_device *ndev, struct netvsc_device *net_device, - struct net_device_context *net_device_ctx, - struct hv_device *device, struct vmbus_channel *channel, const struct vmpacket_descriptor *desc, - struct nvsp_message *nvsp) + const struct nvsp_message *nvsp) { + struct net_device_context *net_device_ctx = netdev_priv(ndev); const struct vmtransfer_page_packet_header *vmxferpage_packet = container_of(desc, const struct vmtransfer_page_packet_header, d); u16 q_idx = channel->offermsg.offer.sub_channel_index; @@ -1157,13 +1153,12 @@ static int netvsc_receive(struct net_device *ndev, return count; } -static void netvsc_send_table(struct hv_device *hdev, - struct nvsp_message *nvmsg) +static void netvsc_send_table(struct net_device *ndev, + const struct nvsp_message *nvmsg) { - struct net_device *ndev = hv_get_drvdata(hdev); struct net_device_context *net_device_ctx = netdev_priv(ndev); - int i; u32 count, *tab; + int i; count = nvmsg->msg.v5_msg.send_table.count; if (count != VRSS_SEND_TAB_SIZE) { @@ -1178,24 +1173,25 @@ static void netvsc_send_table(struct hv_device *hdev, net_device_ctx->tx_table[i] = tab[i]; } -static void netvsc_send_vf(struct net_device_context *net_device_ctx, - struct nvsp_message *nvmsg) +static void netvsc_send_vf(struct net_device *ndev, + const struct nvsp_message *nvmsg) { + struct net_device_context *net_device_ctx = netdev_priv(ndev); + net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; } -static inline void netvsc_receive_inband(struct hv_device *hdev, - struct net_device_context *net_device_ctx, - struct nvsp_message *nvmsg) +static void netvsc_receive_inband(struct net_device *ndev, + const struct nvsp_message *nvmsg) { switch (nvmsg->hdr.msg_type) { case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: - netvsc_send_table(hdev, nvmsg); + netvsc_send_table(ndev, nvmsg); break; case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: - netvsc_send_vf(net_device_ctx, nvmsg); + netvsc_send_vf(ndev, nvmsg); break; } } @@ -1207,24 +1203,23 @@ static int netvsc_process_raw_pkt(struct hv_device *device, const struct vmpacket_descriptor *desc, int budget) { - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct nvsp_message *nvmsg = hv_pkt_data(desc); + const struct nvsp_message *nvmsg = hv_pkt_data(desc); trace_nvsp_recv(ndev, channel, nvmsg); switch (desc->type) { case VM_PKT_COMP: - netvsc_send_completion(net_device, channel, device, + netvsc_send_completion(ndev, net_device, channel, desc, budget); break; case VM_PKT_DATA_USING_XFER_PAGES: - return netvsc_receive(ndev, net_device, net_device_ctx, - device, channel, desc, nvmsg); + return netvsc_receive(ndev, net_device, channel, + desc, nvmsg); break; case VM_PKT_DATA_INBAND: - netvsc_receive_inband(device, net_device_ctx, nvmsg); + netvsc_receive_inband(ndev, nvmsg); break; default: diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index da07ccdf84bf..bef4d55a108c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -43,6 +43,7 @@ #include <net/pkt_sched.h> #include <net/checksum.h> #include <net/ip6_checksum.h> +#include <net/failover.h> #include "hyperv_net.h" @@ -1618,8 +1619,24 @@ static int netvsc_set_ringparam(struct net_device *ndev, return ret; } +static u32 netvsc_get_msglevel(struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + + return ndev_ctx->msg_enable; +} + +static void netvsc_set_msglevel(struct net_device *ndev, u32 val) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + + ndev_ctx->msg_enable = val; +} + static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, + .get_msglevel = netvsc_get_msglevel, + .set_msglevel = netvsc_set_msglevel, .get_link = ethtool_op_get_link, .get_ethtool_stats = netvsc_get_ethtool_stats, .get_sset_count = netvsc_get_sset_count, @@ -1763,46 +1780,6 @@ out_unlock: rtnl_unlock(); } -static struct net_device *get_netvsc_bymac(const u8 *mac) -{ - struct net_device *dev; - - ASSERT_RTNL(); - - for_each_netdev(&init_net, dev) { - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ - - if (ether_addr_equal(mac, dev->perm_addr)) - return dev; - } - - return NULL; -} - -static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) -{ - struct net_device *dev; - - ASSERT_RTNL(); - - for_each_netdev(&init_net, dev) { - struct net_device_context *net_device_ctx; - - if (dev->netdev_ops != &device_ops) - continue; /* not a netvsc device */ - - net_device_ctx = netdev_priv(dev); - if (!rtnl_dereference(net_device_ctx->nvdev)) - continue; /* device is removed */ - - if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) - return dev; /* a match */ - } - - return NULL; -} - /* Called when VF is injecting data into network stack. * Change the associated network device from VF to netvsc. * note: already called with rcu_read_lock @@ -1825,46 +1802,6 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) return RX_HANDLER_ANOTHER; } -static int netvsc_vf_join(struct net_device *vf_netdev, - struct net_device *ndev) -{ - struct net_device_context *ndev_ctx = netdev_priv(ndev); - int ret; - - ret = netdev_rx_handler_register(vf_netdev, - netvsc_vf_handle_frame, ndev); - if (ret != 0) { - netdev_err(vf_netdev, - "can not register netvsc VF receive handler (err = %d)\n", - ret); - goto rx_handler_failed; - } - - ret = netdev_master_upper_dev_link(vf_netdev, ndev, - NULL, NULL, NULL); - if (ret != 0) { - netdev_err(vf_netdev, - "can not set master device %s (err = %d)\n", - ndev->name, ret); - goto upper_link_failed; - } - - /* set slave flag before open to prevent IPv6 addrconf */ - vf_netdev->flags |= IFF_SLAVE; - - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); - - call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); - - netdev_info(vf_netdev, "joined to %s\n", ndev->name); - return 0; - -upper_link_failed: - netdev_rx_handler_unregister(vf_netdev); -rx_handler_failed: - return ret; -} - static void __netvsc_vf_setup(struct net_device *ndev, struct net_device *vf_netdev) { @@ -1915,85 +1852,95 @@ static void netvsc_vf_setup(struct work_struct *w) rtnl_unlock(); } -static int netvsc_register_vf(struct net_device *vf_netdev) +static int netvsc_pre_register_vf(struct net_device *vf_netdev, + struct net_device *ndev) { - struct net_device *ndev; struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; - if (vf_netdev->addr_len != ETH_ALEN) - return NOTIFY_DONE; - - /* - * We will use the MAC address to locate the synthetic interface to - * associate with the VF interface. If we don't find a matching - * synthetic interface, move on. - */ - ndev = get_netvsc_bymac(vf_netdev->perm_addr); - if (!ndev) - return NOTIFY_DONE; - net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) - return NOTIFY_DONE; + return -ENODEV; - if (netvsc_vf_join(vf_netdev, ndev) != 0) - return NOTIFY_DONE; + return 0; +} + +static int netvsc_register_vf(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); - netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + + netdev_info(vf_netdev, "joined to %s\n", ndev->name); dev_hold(vf_netdev); - rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); - return NOTIFY_OK; + rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev); + + return 0; } /* VF up/down change detected, schedule to change data path */ -static int netvsc_vf_changed(struct net_device *vf_netdev) +static int netvsc_vf_changed(struct net_device *vf_netdev, + struct net_device *ndev) { struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; - struct net_device *ndev; bool vf_is_up = netif_running(vf_netdev); - ndev = get_netvsc_byref(vf_netdev); - if (!ndev) - return NOTIFY_DONE; - net_device_ctx = netdev_priv(ndev); netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev) - return NOTIFY_DONE; + return -ENODEV; netvsc_switch_datapath(ndev, vf_is_up); netdev_info(ndev, "Data path switched %s VF: %s\n", vf_is_up ? "to" : "from", vf_netdev->name); - return NOTIFY_OK; + return 0; } -static int netvsc_unregister_vf(struct net_device *vf_netdev) +static int netvsc_pre_unregister_vf(struct net_device *vf_netdev, + struct net_device *ndev) { - struct net_device *ndev; struct net_device_context *net_device_ctx; - ndev = get_netvsc_byref(vf_netdev); - if (!ndev) - return NOTIFY_DONE; - net_device_ctx = netdev_priv(ndev); cancel_delayed_work_sync(&net_device_ctx->vf_takeover); + return 0; +} + +static int netvsc_unregister_vf(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *net_device_ctx; + + net_device_ctx = netdev_priv(ndev); + netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); - netdev_rx_handler_unregister(vf_netdev); - netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); dev_put(vf_netdev); - return NOTIFY_OK; + return 0; } +static struct failover_ops netvsc_failover_ops = { + .slave_pre_register = netvsc_pre_register_vf, + .slave_register = netvsc_register_vf, + .slave_pre_unregister = netvsc_pre_unregister_vf, + .slave_unregister = netvsc_unregister_vf, + .slave_link_change = netvsc_vf_changed, + .slave_handle_frame = netvsc_vf_handle_frame, +}; + static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { @@ -2083,8 +2030,16 @@ static int netvsc_probe(struct hv_device *dev, goto register_failed; } + net_device_ctx->failover = failover_register(net, &netvsc_failover_ops); + if (IS_ERR(net_device_ctx->failover)) { + ret = PTR_ERR(net_device_ctx->failover); + goto err_failover; + } + return ret; +err_failover: + unregister_netdev(net); register_failed: rndis_filter_device_remove(dev, nvdev); rndis_failed: @@ -2125,13 +2080,15 @@ static int netvsc_remove(struct hv_device *dev) rtnl_lock(); vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); if (vf_netdev) - netvsc_unregister_vf(vf_netdev); + failover_slave_unregister(vf_netdev); if (nvdev) rndis_filter_device_remove(dev, nvdev); unregister_netdevice(net); + failover_unregister(ndev_ctx->failover); + rtnl_unlock(); rcu_read_unlock(); @@ -2158,54 +2115,8 @@ static struct hv_driver netvsc_drv = { .remove = netvsc_remove, }; -/* - * On Hyper-V, every VF interface is matched with a corresponding - * synthetic interface. The synthetic interface is presented first - * to the guest. When the corresponding VF instance is registered, - * we will take care of switching the data path. - */ -static int netvsc_netdev_event(struct notifier_block *this, - unsigned long event, void *ptr) -{ - struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); - - /* Skip our own events */ - if (event_dev->netdev_ops == &device_ops) - return NOTIFY_DONE; - - /* Avoid non-Ethernet type devices */ - if (event_dev->type != ARPHRD_ETHER) - return NOTIFY_DONE; - - /* Avoid Vlan dev with same MAC registering as VF */ - if (is_vlan_dev(event_dev)) - return NOTIFY_DONE; - - /* Avoid Bonding master dev with same MAC registering as VF */ - if ((event_dev->priv_flags & IFF_BONDING) && - (event_dev->flags & IFF_MASTER)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_REGISTER: - return netvsc_register_vf(event_dev); - case NETDEV_UNREGISTER: - return netvsc_unregister_vf(event_dev); - case NETDEV_UP: - case NETDEV_DOWN: - return netvsc_vf_changed(event_dev); - default: - return NOTIFY_DONE; - } -} - -static struct notifier_block netvsc_netdev_notifier = { - .notifier_call = netvsc_netdev_event, -}; - static void __exit netvsc_drv_exit(void) { - unregister_netdevice_notifier(&netvsc_netdev_notifier); vmbus_driver_unregister(&netvsc_drv); } @@ -2225,7 +2136,6 @@ static int __init netvsc_drv_init(void) if (ret) return ret; - register_netdevice_notifier(&netvsc_netdev_notifier); return 0; } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index e7ca5b5f39ed..5428bb261102 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -29,6 +29,7 @@ #include <linux/nls.h> #include <linux/vmalloc.h> #include <linux/rtnetlink.h> +#include <linux/ucs2_string.h> #include "hyperv_net.h" #include "netvsc_trace.h" @@ -751,7 +752,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); rssp->hashkey_size = NETVSC_HASH_KEYLEN; - rssp->kashkey_offset = rssp->indirect_taboffset + + rssp->hashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; /* Set indirection table entries */ @@ -760,7 +761,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev, itab[i] = rdev->rx_table[i]; /* Set hask key values */ - keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); + keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset); memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN); ret = rndis_filter_send_request(rdev, request); @@ -1223,6 +1224,32 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, return ret; } +static void rndis_get_friendly_name(struct net_device *net, + struct rndis_device *rndis_device, + struct netvsc_device *net_device) +{ + ucs2_char_t wname[256]; + unsigned long len; + u8 ifalias[256]; + u32 size; + + size = sizeof(wname); + if (rndis_filter_query_device(rndis_device, net_device, + RNDIS_OID_GEN_FRIENDLY_NAME, + wname, &size) != 0) + return; /* ignore if host does not support */ + + if (size == 0) + return; /* name not set */ + + /* Convert Windows Unicode string to UTF-8 */ + len = ucs2_as_utf8(ifalias, wname, sizeof(ifalias)); + + /* ignore the default value from host */ + if (strcmp(ifalias, "Network Adapter") != 0) + dev_set_alias(net, ifalias, len); +} + struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { @@ -1276,6 +1303,10 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); + /* Get friendly name as ifalias*/ + if (!net->ifalias) + rndis_get_friendly_name(net, rndis_device, net_device); + /* Query and set hardware capabilities */ ret = rndis_netdev_set_hwcaps(rndis_device, net_device); if (ret != 0) diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 5f2897ec0edc..d345c61d476c 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -102,7 +102,7 @@ static void ifb_ri_tasklet(unsigned long _txp) if (!skb->tc_from_ingress) { dev_queue_xmit(skb); } else { - skb_pull(skb, skb->mac_len); + skb_pull_rcsum(skb, skb->mac_len); netif_receive_skb(skb); } } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 725f4b4afc6d..adde8fc45588 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -514,6 +514,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; + void *accel_priv = NULL; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; @@ -533,9 +534,14 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) } } + /* For packets that are non-multicast and not bridged we will pass + * the necessary information so that the lowerdev can distinguish + * the source of the packets via the accel_priv value. + */ + accel_priv = vlan->accel_priv; xmit_world: skb->dev = vlan->lowerdev; - return dev_queue_xmit(skb); + return dev_queue_xmit_accel(skb, accel_priv); } static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb) @@ -552,19 +558,14 @@ static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, str static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev) { + struct macvlan_dev *vlan = netdev_priv(dev); unsigned int len = skb->len; int ret; - struct macvlan_dev *vlan = netdev_priv(dev); if (unlikely(netpoll_tx_running(dev))) return macvlan_netpoll_send_skb(vlan, skb); - if (vlan->fwd_priv) { - skb->dev = vlan->lowerdev; - ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); - } else { - ret = macvlan_queue_xmit(skb, dev); - } + ret = macvlan_queue_xmit(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *pcpu_stats; @@ -613,26 +614,27 @@ static int macvlan_open(struct net_device *dev) goto hash_add; } - if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { - vlan->fwd_priv = - lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); - - /* If we get a NULL pointer back, or if we get an error - * then we should just fall through to the non accelerated path - */ - if (IS_ERR_OR_NULL(vlan->fwd_priv)) { - vlan->fwd_priv = NULL; - } else - return 0; - } - err = -EBUSY; if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; - err = dev_uc_add(lowerdev, dev->dev_addr); - if (err < 0) - goto out; + /* Attempt to populate accel_priv which is used to offload the L2 + * forwarding requests for unicast packets. + */ + if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + vlan->accel_priv = + lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); + + /* If earlier attempt to offload failed, or accel_priv is not + * populated we must add the unicast address to the lower device. + */ + if (IS_ERR_OR_NULL(vlan->accel_priv)) { + vlan->accel_priv = NULL; + err = dev_uc_add(lowerdev, dev->dev_addr); + if (err < 0) + goto out; + } + if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(lowerdev, 1); if (err < 0) @@ -653,13 +655,14 @@ clear_multi: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); del_unicast: - dev_uc_del(lowerdev, dev->dev_addr); -out: - if (vlan->fwd_priv) { + if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, - vlan->fwd_priv); - vlan->fwd_priv = NULL; + vlan->accel_priv); + vlan->accel_priv = NULL; + } else { + dev_uc_del(lowerdev, dev->dev_addr); } +out: return err; } @@ -668,11 +671,10 @@ static int macvlan_stop(struct net_device *dev) struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (vlan->fwd_priv) { + if (vlan->accel_priv) { lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev, - vlan->fwd_priv); - vlan->fwd_priv = NULL; - return 0; + vlan->accel_priv); + vlan->accel_priv = NULL; } dev_uc_unsync(lowerdev, dev); diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c new file mode 100644 index 000000000000..83f7420ddea5 --- /dev/null +++ b/drivers/net/net_failover.c @@ -0,0 +1,837 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, Intel Corporation. */ + +/* This provides a net_failover interface for paravirtual drivers to + * provide an alternate datapath by exporting APIs to create and + * destroy a upper 'net_failover' netdev. The upper dev manages the + * original paravirtual interface as a 'standby' netdev and uses the + * generic failover infrastructure to register and manage a direct + * attached VF as a 'primary' netdev. This enables live migration of + * a VM with direct attached VF by failing over to the paravirtual + * datapath when the VF is unplugged. + * + * Some of the netdev management routines are based on bond/team driver as + * this driver provides active-backup functionality similar to those drivers. + */ + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/netdevice.h> +#include <linux/netpoll.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> +#include <linux/pci.h> +#include <net/sch_generic.h> +#include <uapi/linux/if_arp.h> +#include <net/net_failover.h> + +static bool net_failover_xmit_ready(struct net_device *dev) +{ + return netif_running(dev) && netif_carrier_ok(dev); +} + +static int net_failover_open(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int err; + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + if (primary_dev) { + err = dev_open(primary_dev); + if (err) + goto err_primary_open; + } + + standby_dev = rtnl_dereference(nfo_info->standby_dev); + if (standby_dev) { + err = dev_open(standby_dev); + if (err) + goto err_standby_open; + } + + if ((primary_dev && net_failover_xmit_ready(primary_dev)) || + (standby_dev && net_failover_xmit_ready(standby_dev))) { + netif_carrier_on(dev); + netif_tx_wake_all_queues(dev); + } + + return 0; + +err_standby_open: + dev_close(primary_dev); +err_primary_open: + netif_tx_disable(dev); + return err; +} + +static int net_failover_close(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + netif_tx_disable(dev); + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (slave_dev) + dev_close(slave_dev); + + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (slave_dev) + dev_close(slave_dev); + + return 0; +} + +static netdev_tx_t net_failover_drop_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + atomic_long_inc(&dev->tx_dropped); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *xmit_dev; + + /* Try xmit via primary netdev followed by standby netdev */ + xmit_dev = rcu_dereference_bh(nfo_info->primary_dev); + if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) { + xmit_dev = rcu_dereference_bh(nfo_info->standby_dev); + if (!xmit_dev || !net_failover_xmit_ready(xmit_dev)) + return net_failover_drop_xmit(skb, dev); + } + + skb->dev = xmit_dev; + skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + + return dev_queue_xmit(skb); +} + +static u16 net_failover_select_queue(struct net_device *dev, + struct sk_buff *skb, void *accel_priv, + select_queue_fallback_t fallback) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev; + u16 txq; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + const struct net_device_ops *ops = primary_dev->netdev_ops; + + if (ops->ndo_select_queue) + txq = ops->ndo_select_queue(primary_dev, skb, + accel_priv, fallback); + else + txq = fallback(primary_dev, skb); + + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + + return txq; + } + + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + + /* Save the original txq to restore before passing to the driver */ + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + + if (unlikely(txq >= dev->real_num_tx_queues)) { + do { + txq -= dev->real_num_tx_queues; + } while (txq >= dev->real_num_tx_queues); + } + + return txq; +} + +/* fold stats, assuming all rtnl_link_stats64 fields are u64, but + * that some drivers can provide 32bit values only. + */ +static void net_failover_fold_stats(struct rtnl_link_stats64 *_res, + const struct rtnl_link_stats64 *_new, + const struct rtnl_link_stats64 *_old) +{ + const u64 *new = (const u64 *)_new; + const u64 *old = (const u64 *)_old; + u64 *res = (u64 *)_res; + int i; + + for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { + u64 nv = new[i]; + u64 ov = old[i]; + s64 delta = nv - ov; + + /* detects if this particular field is 32bit only */ + if (((nv | ov) >> 32) == 0) + delta = (s64)(s32)((u32)nv - (u32)ov); + + /* filter anomalies, some drivers reset their stats + * at down/up events. + */ + if (delta > 0) + res[i] += delta; + } +} + +static void net_failover_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + const struct rtnl_link_stats64 *new; + struct rtnl_link_stats64 temp; + struct net_device *slave_dev; + + spin_lock(&nfo_info->stats_lock); + memcpy(stats, &nfo_info->failover_stats, sizeof(*stats)); + + rcu_read_lock(); + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) { + new = dev_get_stats(slave_dev, &temp); + net_failover_fold_stats(stats, new, &nfo_info->primary_stats); + memcpy(&nfo_info->primary_stats, new, sizeof(*new)); + } + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) { + new = dev_get_stats(slave_dev, &temp); + net_failover_fold_stats(stats, new, &nfo_info->standby_stats); + memcpy(&nfo_info->standby_stats, new, sizeof(*new)); + } + + rcu_read_unlock(); + + memcpy(&nfo_info->failover_stats, stats, sizeof(*stats)); + spin_unlock(&nfo_info->stats_lock); +} + +static int net_failover_change_mtu(struct net_device *dev, int new_mtu) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int ret = 0; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + ret = dev_set_mtu(primary_dev, new_mtu); + if (ret) + return ret; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + ret = dev_set_mtu(standby_dev, new_mtu); + if (ret) { + if (primary_dev) + dev_set_mtu(primary_dev, dev->mtu); + return ret; + } + } + + dev->mtu = new_mtu; + + return 0; +} + +static void net_failover_set_rx_mode(struct net_device *dev) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + rcu_read_lock(); + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) { + dev_uc_sync_multiple(slave_dev, dev); + dev_mc_sync_multiple(slave_dev, dev); + } + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) { + dev_uc_sync_multiple(slave_dev, dev); + dev_mc_sync_multiple(slave_dev, dev); + } + + rcu_read_unlock(); +} + +static int net_failover_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + int ret = 0; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + ret = vlan_vid_add(primary_dev, proto, vid); + if (ret) + return ret; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + ret = vlan_vid_add(standby_dev, proto, vid); + if (ret) + if (primary_dev) + vlan_vid_del(primary_dev, proto, vid); + } + + return ret; +} + +static int net_failover_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + slave_dev = rcu_dereference(nfo_info->primary_dev); + if (slave_dev) + vlan_vid_del(slave_dev, proto, vid); + + slave_dev = rcu_dereference(nfo_info->standby_dev); + if (slave_dev) + vlan_vid_del(slave_dev, proto, vid); + + return 0; +} + +static const struct net_device_ops failover_dev_ops = { + .ndo_open = net_failover_open, + .ndo_stop = net_failover_close, + .ndo_start_xmit = net_failover_start_xmit, + .ndo_select_queue = net_failover_select_queue, + .ndo_get_stats64 = net_failover_get_stats, + .ndo_change_mtu = net_failover_change_mtu, + .ndo_set_rx_mode = net_failover_set_rx_mode, + .ndo_vlan_rx_add_vid = net_failover_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = net_failover_vlan_rx_kill_vid, + .ndo_validate_addr = eth_validate_addr, + .ndo_features_check = passthru_features_check, +}; + +#define FAILOVER_NAME "net_failover" +#define FAILOVER_VERSION "0.1" + +static void nfo_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, FAILOVER_NAME, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, FAILOVER_VERSION, sizeof(drvinfo->version)); +} + +static int nfo_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *slave_dev; + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (!slave_dev || !net_failover_xmit_ready(slave_dev)) { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.port = PORT_OTHER; + cmd->base.speed = SPEED_UNKNOWN; + + return 0; + } + } + + return __ethtool_get_link_ksettings(slave_dev, cmd); +} + +static const struct ethtool_ops failover_ethtool_ops = { + .get_drvinfo = nfo_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = nfo_ethtool_get_link_ksettings, +}; + +/* Called when slave dev is injecting data into network stack. + * Change the associated network device from lower dev to failover dev. + * note: already called with rcu_read_lock + */ +static rx_handler_result_t net_failover_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *dev = rcu_dereference(skb->dev->rx_handler_data); + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + standby_dev = rcu_dereference(nfo_info->standby_dev); + + if (primary_dev && skb->dev == standby_dev) + return RX_HANDLER_EXACT; + + skb->dev = dev; + + return RX_HANDLER_ANOTHER; +} + +static void net_failover_compute_features(struct net_device *dev) +{ + netdev_features_t vlan_features = FAILOVER_VLAN_FEATURES & + NETIF_F_ALL_FOR_ALL; + netdev_features_t enc_features = FAILOVER_ENC_FEATURES; + unsigned short max_hard_header_len = ETH_HLEN; + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; + struct net_failover_info *nfo_info = netdev_priv(dev); + struct net_device *primary_dev, *standby_dev; + + primary_dev = rcu_dereference(nfo_info->primary_dev); + if (primary_dev) { + vlan_features = + netdev_increment_features(vlan_features, + primary_dev->vlan_features, + FAILOVER_VLAN_FEATURES); + enc_features = + netdev_increment_features(enc_features, + primary_dev->hw_enc_features, + FAILOVER_ENC_FEATURES); + + dst_release_flag &= primary_dev->priv_flags; + if (primary_dev->hard_header_len > max_hard_header_len) + max_hard_header_len = primary_dev->hard_header_len; + } + + standby_dev = rcu_dereference(nfo_info->standby_dev); + if (standby_dev) { + vlan_features = + netdev_increment_features(vlan_features, + standby_dev->vlan_features, + FAILOVER_VLAN_FEATURES); + enc_features = + netdev_increment_features(enc_features, + standby_dev->hw_enc_features, + FAILOVER_ENC_FEATURES); + + dst_release_flag &= standby_dev->priv_flags; + if (standby_dev->hard_header_len > max_hard_header_len) + max_hard_header_len = standby_dev->hard_header_len; + } + + dev->vlan_features = vlan_features; + dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; + dev->hard_header_len = max_hard_header_len; + + dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + if (dst_release_flag == (IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM)) + dev->priv_flags |= IFF_XMIT_DST_RELEASE; + + netdev_change_features(dev); +} + +static void net_failover_lower_state_changed(struct net_device *slave_dev, + struct net_device *primary_dev, + struct net_device *standby_dev) +{ + struct netdev_lag_lower_state_info info; + + if (netif_carrier_ok(slave_dev)) + info.link_up = true; + else + info.link_up = false; + + if (slave_dev == primary_dev) { + if (netif_running(primary_dev)) + info.tx_enabled = true; + else + info.tx_enabled = false; + } else { + if ((primary_dev && netif_running(primary_dev)) || + (!netif_running(standby_dev))) + info.tx_enabled = false; + else + info.tx_enabled = true; + } + + netdev_lower_state_changed(slave_dev, &info); +} + +static int net_failover_slave_pre_register(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + + nfo_info = netdev_priv(failover_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + if (slave_is_standby ? standby_dev : primary_dev) { + netdev_err(failover_dev, "%s attempting to register as slave dev when %s already present\n", + slave_dev->name, + slave_is_standby ? "standby" : "primary"); + return -EINVAL; + } + + /* We want to allow only a direct attached VF device as a primary + * netdev. As there is no easy way to check for a VF device, restrict + * this to a pci device. + */ + if (!slave_is_standby && (!slave_dev->dev.parent || + !dev_is_pci(slave_dev->dev.parent))) + return -EINVAL; + + if (failover_dev->features & NETIF_F_VLAN_CHALLENGED && + vlan_uses_dev(failover_dev)) { + netdev_err(failover_dev, "Device %s is VLAN challenged and failover device has VLAN set up\n", + failover_dev->name); + return -EINVAL; + } + + return 0; +} + +static int net_failover_slave_register(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + u32 orig_mtu; + int err; + + /* Align MTU of slave with failover dev */ + orig_mtu = slave_dev->mtu; + err = dev_set_mtu(slave_dev, failover_dev->mtu); + if (err) { + netdev_err(failover_dev, "unable to change mtu of %s to %u register failed\n", + slave_dev->name, failover_dev->mtu); + goto done; + } + + dev_hold(slave_dev); + + if (netif_running(failover_dev)) { + err = dev_open(slave_dev); + if (err && (err != -EBUSY)) { + netdev_err(failover_dev, "Opening slave %s failed err:%d\n", + slave_dev->name, err); + goto err_dev_open; + } + } + + netif_addr_lock_bh(failover_dev); + dev_uc_sync_multiple(slave_dev, failover_dev); + dev_uc_sync_multiple(slave_dev, failover_dev); + netif_addr_unlock_bh(failover_dev); + + err = vlan_vids_add_by_dev(slave_dev, failover_dev); + if (err) { + netdev_err(failover_dev, "Failed to add vlan ids to device %s err:%d\n", + slave_dev->name, err); + goto err_vlan_add; + } + + nfo_info = netdev_priv(failover_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + + if (slave_is_standby) { + rcu_assign_pointer(nfo_info->standby_dev, slave_dev); + standby_dev = slave_dev; + dev_get_stats(standby_dev, &nfo_info->standby_stats); + } else { + rcu_assign_pointer(nfo_info->primary_dev, slave_dev); + primary_dev = slave_dev; + dev_get_stats(primary_dev, &nfo_info->primary_stats); + failover_dev->min_mtu = slave_dev->min_mtu; + failover_dev->max_mtu = slave_dev->max_mtu; + } + + net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); + net_failover_compute_features(failover_dev); + + call_netdevice_notifiers(NETDEV_JOIN, slave_dev); + + netdev_info(failover_dev, "failover %s slave:%s registered\n", + slave_is_standby ? "standby" : "primary", slave_dev->name); + + return 0; + +err_vlan_add: + dev_uc_unsync(slave_dev, failover_dev); + dev_mc_unsync(slave_dev, failover_dev); + dev_close(slave_dev); +err_dev_open: + dev_put(slave_dev); + dev_set_mtu(slave_dev, orig_mtu); +done: + return err; +} + +static int net_failover_slave_pre_unregister(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + return 0; +} + +static int net_failover_slave_unregister(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *standby_dev, *primary_dev; + struct net_failover_info *nfo_info; + bool slave_is_standby; + + nfo_info = netdev_priv(failover_dev); + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + vlan_vids_del_by_dev(slave_dev, failover_dev); + dev_uc_unsync(slave_dev, failover_dev); + dev_mc_unsync(slave_dev, failover_dev); + dev_close(slave_dev); + + nfo_info = netdev_priv(failover_dev); + dev_get_stats(failover_dev, &nfo_info->failover_stats); + + slave_is_standby = slave_dev->dev.parent == failover_dev->dev.parent; + if (slave_is_standby) { + RCU_INIT_POINTER(nfo_info->standby_dev, NULL); + } else { + RCU_INIT_POINTER(nfo_info->primary_dev, NULL); + if (standby_dev) { + failover_dev->min_mtu = standby_dev->min_mtu; + failover_dev->max_mtu = standby_dev->max_mtu; + } + } + + dev_put(slave_dev); + + net_failover_compute_features(failover_dev); + + netdev_info(failover_dev, "failover %s slave:%s unregistered\n", + slave_is_standby ? "standby" : "primary", slave_dev->name); + + return 0; +} + +static int net_failover_slave_link_change(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *primary_dev, *standby_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + if ((primary_dev && net_failover_xmit_ready(primary_dev)) || + (standby_dev && net_failover_xmit_ready(standby_dev))) { + netif_carrier_on(failover_dev); + netif_tx_wake_all_queues(failover_dev); + } else { + dev_get_stats(failover_dev, &nfo_info->failover_stats); + netif_carrier_off(failover_dev); + netif_tx_stop_all_queues(failover_dev); + } + + net_failover_lower_state_changed(slave_dev, primary_dev, standby_dev); + + return 0; +} + +static int net_failover_slave_name_change(struct net_device *slave_dev, + struct net_device *failover_dev) +{ + struct net_device *primary_dev, *standby_dev; + struct net_failover_info *nfo_info; + + nfo_info = netdev_priv(failover_dev); + + primary_dev = rtnl_dereference(nfo_info->primary_dev); + standby_dev = rtnl_dereference(nfo_info->standby_dev); + + if (slave_dev != primary_dev && slave_dev != standby_dev) + return -ENODEV; + + /* We need to bring up the slave after the rename by udev in case + * open failed with EBUSY when it was registered. + */ + dev_open(slave_dev); + + return 0; +} + +static struct failover_ops net_failover_ops = { + .slave_pre_register = net_failover_slave_pre_register, + .slave_register = net_failover_slave_register, + .slave_pre_unregister = net_failover_slave_pre_unregister, + .slave_unregister = net_failover_slave_unregister, + .slave_link_change = net_failover_slave_link_change, + .slave_name_change = net_failover_slave_name_change, + .slave_handle_frame = net_failover_handle_frame, +}; + +/** + * net_failover_create - Create and register a failover instance + * + * @dev: standby netdev + * + * Creates a failover netdev and registers a failover instance for a standby + * netdev. Used by paravirtual drivers that use 3-netdev model. + * The failover netdev acts as a master device and controls 2 slave devices - + * the original standby netdev and a VF netdev with the same MAC gets + * registered as primary netdev. + * + * Return: pointer to failover instance + */ +struct failover *net_failover_create(struct net_device *standby_dev) +{ + struct device *dev = standby_dev->dev.parent; + struct net_device *failover_dev; + struct failover *failover; + int err; + + /* Alloc at least 2 queues, for now we are going with 16 assuming + * that VF devices being enslaved won't have too many queues. + */ + failover_dev = alloc_etherdev_mq(sizeof(struct net_failover_info), 16); + if (!failover_dev) { + dev_err(dev, "Unable to allocate failover_netdev!\n"); + return ERR_PTR(-ENOMEM); + } + + dev_net_set(failover_dev, dev_net(standby_dev)); + SET_NETDEV_DEV(failover_dev, dev); + + failover_dev->netdev_ops = &failover_dev_ops; + failover_dev->ethtool_ops = &failover_ethtool_ops; + + /* Initialize the device options */ + failover_dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; + failover_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | + IFF_TX_SKB_SHARING); + + /* don't acquire failover netdev's netif_tx_lock when transmitting */ + failover_dev->features |= NETIF_F_LLTX; + + /* Don't allow failover devices to change network namespaces. */ + failover_dev->features |= NETIF_F_NETNS_LOCAL; + + failover_dev->hw_features = FAILOVER_VLAN_FEATURES | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; + failover_dev->features |= failover_dev->hw_features; + + memcpy(failover_dev->dev_addr, standby_dev->dev_addr, + failover_dev->addr_len); + + failover_dev->min_mtu = standby_dev->min_mtu; + failover_dev->max_mtu = standby_dev->max_mtu; + + err = register_netdev(failover_dev); + if (err) { + dev_err(dev, "Unable to register failover_dev!\n"); + goto err_register_netdev; + } + + netif_carrier_off(failover_dev); + + failover = failover_register(failover_dev, &net_failover_ops); + if (IS_ERR(failover)) + goto err_failover_register; + + return failover; + +err_failover_register: + unregister_netdev(failover_dev); +err_register_netdev: + free_netdev(failover_dev); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(net_failover_create); + +/** + * net_failover_destroy - Destroy a failover instance + * + * @failover: pointer to failover instance + * + * Unregisters any slave netdevs associated with the failover instance by + * calling failover_slave_unregister(). + * unregisters the failover instance itself and finally frees the failover + * netdev. Used by paravirtual drivers that use 3-netdev model. + * + */ +void net_failover_destroy(struct failover *failover) +{ + struct net_failover_info *nfo_info; + struct net_device *failover_dev; + struct net_device *slave_dev; + + if (!failover) + return; + + failover_dev = rcu_dereference(failover->failover_dev); + nfo_info = netdev_priv(failover_dev); + + netif_device_detach(failover_dev); + + rtnl_lock(); + + slave_dev = rtnl_dereference(nfo_info->primary_dev); + if (slave_dev) + failover_slave_unregister(slave_dev); + + slave_dev = rtnl_dereference(nfo_info->standby_dev); + if (slave_dev) + failover_slave_unregister(slave_dev); + + failover_unregister(failover); + + unregister_netdevice(failover_dev); + + rtnl_unlock(); + + free_netdev(failover_dev); +} +EXPORT_SYMBOL_GPL(net_failover_destroy); + +static __init int +net_failover_init(void) +{ + return 0; +} +module_init(net_failover_init); + +static __exit +void net_failover_exit(void) +{ +} +module_exit(net_failover_exit); + +MODULE_DESCRIPTION("Failover driver for Paravirtual drivers"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c index bef7db5d129a..ba663e5af168 100644 --- a/drivers/net/netdevsim/devlink.c +++ b/drivers/net/netdevsim/devlink.c @@ -147,7 +147,8 @@ out: return err; } -static int nsim_devlink_reload(struct devlink *devlink) +static int nsim_devlink_reload(struct devlink *devlink, + struct netlink_ext_ack *extack) { enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, @@ -162,7 +163,7 @@ static int nsim_devlink_reload(struct devlink *devlink) err = devlink_resource_size_get(devlink, res_ids[i], &val); if (!err) { - err = nsim_fib_set_max(net, res_ids[i], val); + err = nsim_fib_set_max(net, res_ids[i], val, extack); if (err) return err; } @@ -180,7 +181,7 @@ static void nsim_devlink_net_reset(struct net *net) int i; for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { - if (nsim_fib_set_max(net, res_ids[i], (u64)-1)) { + if (nsim_fib_set_max(net, res_ids[i], (u64)-1, NULL)) { pr_err("Failed to reset limit for resource %u\n", res_ids[i]); } diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c index 9bfe9e151e13..f61d094746c0 100644 --- a/drivers/net/netdevsim/fib.c +++ b/drivers/net/netdevsim/fib.c @@ -64,7 +64,8 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max) return max ? entry->max : entry->num; } -int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val) +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, + struct netlink_ext_ack *extack) { struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id); struct nsim_fib_entry *entry; @@ -90,10 +91,12 @@ int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val) /* not allowing a new max to be less than curren occupancy * --> no means of evicting entries */ - if (val < entry->num) + if (val < entry->num) { + NL_SET_ERR_MSG_MOD(extack, "New size is less than current occupancy"); err = -EINVAL; - else + } else { entry->max = val; + } return err; } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 3a8581af3b85..8ca50b72c328 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -126,7 +126,8 @@ void nsim_devlink_exit(void); int nsim_fib_init(void); void nsim_fib_exit(void); u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); -int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val); +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, + struct netlink_ext_ack *extack); #else static inline int nsim_devlink_setup(struct netdevsim *ns) { diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index bdfbabb86ee0..343989f9f9d9 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -118,11 +118,18 @@ config MDIO_I2C config MDIO_MOXART tristate "MOXA ART MDIO interface support" - depends on ARCH_MOXART + depends on ARCH_MOXART || COMPILE_TEST help This driver supports the MDIO interface found in the network interface units of the MOXA ART SoC +config MDIO_MSCC_MIIM + tristate "Microsemi MIIM interface support" + depends on HAS_IOMEM + help + This driver supports the MIIM (MDIO) interface found in the network + switches of the Microsemi SoCs + config MDIO_OCTEON tristate "Octeon and some ThunderX SOCs MDIO buses" depends on 64BIT @@ -135,7 +142,7 @@ config MDIO_OCTEON config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" - depends on ARCH_SUNXI + depends on ARCH_SUNXI || COMPILE_TEST help This driver supports the MDIO interface found in the network interface units of the Allwinner SoC that have an EMAC (A10, @@ -218,6 +225,12 @@ config AQUANTIA_PHY ---help--- Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 +config ASIX_PHY + tristate "Asix PHYs" + help + Currently supports the Asix Electronics PHY found in the X-Surf 100 + AX88796B package. + config AT803X_PHY tristate "AT803X PHYs" ---help--- @@ -285,6 +298,11 @@ config DP83822_PHY ---help--- Supports the DP83822 PHY. +config DP83TC811_PHY + tristate "Texas Instruments DP83TC822 PHY" + ---help--- + Supports the DP83TC822 PHY. + config DP83848_PHY tristate "Texas Instruments DP83848 PHY" ---help--- @@ -354,6 +372,11 @@ config MICROCHIP_PHY help Supports the LAN88XX PHYs. +config MICROCHIP_T1_PHY + tristate "Microchip T1 PHYs" + ---help--- + Supports the LAN87XX PHYs. + config MICROSEMI_PHY tristate "Microsemi PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 01acbcb2c798..5805c0b7d60e 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o +obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o @@ -45,6 +46,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m) obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o +obj-$(CONFIG_ASIX_PHY) += asix.o obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o @@ -57,6 +59,7 @@ obj-$(CONFIG_CORTINA_PHY) += cortina.o obj-$(CONFIG_DAVICOM_PHY) += davicom.o obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_DP83822_PHY) += dp83822.o +obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_FIXED_PHY) += fixed_phy.o @@ -70,6 +73,7 @@ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o +obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o obj-$(CONFIG_MICROSEMI_PHY) += mscc.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c new file mode 100644 index 000000000000..8ebe7f5484ae --- /dev/null +++ b/drivers/net/phy/asix.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Driver for Asix PHYs + * + * Author: Michael Schmitz <schmitzmic@gmail.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#define PHY_ID_ASIX_AX88796B 0x003b1841 + +MODULE_DESCRIPTION("Asix PHY driver"); +MODULE_AUTHOR("Michael Schmitz <schmitzmic@gmail.com>"); +MODULE_LICENSE("GPL"); + +/** + * asix_soft_reset - software reset the PHY via BMCR_RESET bit + * @phydev: target phy_device struct + * + * Description: Perform a software PHY reset using the standard + * BMCR_RESET bit and poll for the reset bit to be cleared. + * Toggle BMCR_RESET bit off to accommodate broken AX8796B PHY implementation + * such as used on the Individual Computers' X-Surf 100 Zorro card. + * + * Returns: 0 on success, < 0 on failure + */ +static int asix_soft_reset(struct phy_device *phydev) +{ + int ret; + + /* Asix PHY won't reset unless reset bit toggles */ + ret = phy_write(phydev, MII_BMCR, 0); + if (ret < 0) + return ret; + + return genphy_soft_reset(phydev); +} + +static struct phy_driver asix_driver[] = { { + .phy_id = PHY_ID_ASIX_AX88796B, + .name = "Asix Electronics AX88796B", + .phy_id_mask = 0xfffffff0, + .features = PHY_BASIC_FEATURES, + .soft_reset = asix_soft_reset, +} }; + +module_phy_driver(asix_driver); + +static struct mdio_device_id __maybe_unused asix_tbl[] = { + { PHY_ID_ASIX_AX88796B, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, asix_tbl); diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index d5e0833d69b9..e10e7b54ec4b 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -346,10 +346,6 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data) } EXPORT_SYMBOL_GPL(bcm_phy_get_strings); -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif - /* Caller is supposed to provide appropriate storage for the library code to * access the shadow copy */ @@ -362,7 +358,7 @@ static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow, val = phy_read(phydev, stat.reg); if (val < 0) { - ret = UINT64_MAX; + ret = U64_MAX; } else { val >>= stat.shift; val = val & ((1 << stat.bits) - 1); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index f9c25912eb98..e86ea105c802 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -54,6 +54,8 @@ static int bcm54210e_config_init(struct phy_device *phydev) static int bcm54612e_config_init(struct phy_device *phydev) { + int reg; + /* Clear TX internal delay unless requested. */ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) { @@ -65,8 +67,6 @@ static int bcm54612e_config_init(struct phy_device *phydev) /* Clear RX internal delay unless requested. */ if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) { - u16 reg; - reg = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC); /* Disable RXD to RXC delay (default set) */ @@ -77,6 +77,18 @@ static int bcm54612e_config_init(struct phy_device *phydev) MII_BCM54XX_AUXCTL_MISC_WREN | reg); } + /* Enable CLK125 MUX on LED4 if ref clock is enabled. */ + if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) { + int err; + + reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0); + err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0, + BCM54612E_LED4_CLK125OUT_EN | reg); + + if (err < 0) + return err; + } + return 0; } diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c new file mode 100644 index 000000000000..081d99aa3985 --- /dev/null +++ b/drivers/net/phy/dp83tc811.c @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for the Texas Instruments DP83TC811 PHY + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * + */ + +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/phy.h> +#include <linux/netdevice.h> + +#define DP83TC811_PHY_ID 0x2000a253 +#define DP83811_DEVADDR 0x1f + +#define MII_DP83811_SGMII_CTRL 0x09 +#define MII_DP83811_INT_STAT1 0x12 +#define MII_DP83811_INT_STAT2 0x13 +#define MII_DP83811_RESET_CTRL 0x1f + +#define DP83811_HW_RESET BIT(15) +#define DP83811_SW_RESET BIT(14) + +/* INT_STAT1 bits */ +#define DP83811_RX_ERR_HF_INT_EN BIT(0) +#define DP83811_MS_TRAINING_INT_EN BIT(1) +#define DP83811_ANEG_COMPLETE_INT_EN BIT(2) +#define DP83811_ESD_EVENT_INT_EN BIT(3) +#define DP83811_WOL_INT_EN BIT(4) +#define DP83811_LINK_STAT_INT_EN BIT(5) +#define DP83811_ENERGY_DET_INT_EN BIT(6) +#define DP83811_LINK_QUAL_INT_EN BIT(7) + +/* INT_STAT2 bits */ +#define DP83811_JABBER_DET_INT_EN BIT(0) +#define DP83811_POLARITY_INT_EN BIT(1) +#define DP83811_SLEEP_MODE_INT_EN BIT(2) +#define DP83811_OVERTEMP_INT_EN BIT(3) +#define DP83811_OVERVOLTAGE_INT_EN BIT(6) +#define DP83811_UNDERVOLTAGE_INT_EN BIT(7) + +#define MII_DP83811_RXSOP1 0x04a5 +#define MII_DP83811_RXSOP2 0x04a6 +#define MII_DP83811_RXSOP3 0x04a7 + +/* WoL Registers */ +#define MII_DP83811_WOL_CFG 0x04a0 +#define MII_DP83811_WOL_STAT 0x04a1 +#define MII_DP83811_WOL_DA1 0x04a2 +#define MII_DP83811_WOL_DA2 0x04a3 +#define MII_DP83811_WOL_DA3 0x04a4 + +/* WoL bits */ +#define DP83811_WOL_MAGIC_EN BIT(0) +#define DP83811_WOL_SECURE_ON BIT(5) +#define DP83811_WOL_EN BIT(7) +#define DP83811_WOL_INDICATION_SEL BIT(8) +#define DP83811_WOL_CLR_INDICATION BIT(11) + +/* SGMII CTRL bits */ +#define DP83811_TDR_AUTO BIT(8) +#define DP83811_SGMII_EN BIT(12) +#define DP83811_SGMII_AUTO_NEG_EN BIT(13) +#define DP83811_SGMII_TX_ERR_DIS BIT(14) +#define DP83811_SGMII_SOFT_RESET BIT(15) + +static int dp83811_ack_interrupt(struct phy_device *phydev) +{ + int err; + + err = phy_read(phydev, MII_DP83811_INT_STAT1); + if (err < 0) + return err; + + err = phy_read(phydev, MII_DP83811_INT_STAT2); + if (err < 0) + return err; + + return 0; +} + +static int dp83811_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + struct net_device *ndev = phydev->attached_dev; + const u8 *mac; + u16 value; + + if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { + mac = (const u8 *)ndev->dev_addr; + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + /* MAC addresses start with byte 5, but stored in mac[0]. + * 811 PHYs store bytes 4|5, 2|3, 0|1 + */ + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA1, + (mac[1] << 8) | mac[0]); + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA2, + (mac[3] << 8) | mac[2]); + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_DA3, + (mac[5] << 8) | mac[4]); + + value = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_WOL_CFG); + if (wol->wolopts & WAKE_MAGIC) + value |= DP83811_WOL_MAGIC_EN; + else + value &= ~DP83811_WOL_MAGIC_EN; + + if (wol->wolopts & WAKE_MAGICSECURE) { + phy_write_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP1, + (wol->sopass[1] << 8) | wol->sopass[0]); + phy_write_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP2, + (wol->sopass[3] << 8) | wol->sopass[2]); + phy_write_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP3, + (wol->sopass[5] << 8) | wol->sopass[4]); + value |= DP83811_WOL_SECURE_ON; + } else { + value &= ~DP83811_WOL_SECURE_ON; + } + + value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL | + DP83811_WOL_CLR_INDICATION); + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + value); + } else { + value = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_WOL_CFG); + value &= ~DP83811_WOL_EN; + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + value); + } + + return 0; +} + +static void dp83811_get_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + u16 sopass_val; + int value; + + wol->supported = (WAKE_MAGIC | WAKE_MAGICSECURE); + wol->wolopts = 0; + + value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG); + + if (value & DP83811_WOL_MAGIC_EN) + wol->wolopts |= WAKE_MAGIC; + + if (value & DP83811_WOL_SECURE_ON) { + sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP1); + wol->sopass[0] = (sopass_val & 0xff); + wol->sopass[1] = (sopass_val >> 8); + + sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP2); + wol->sopass[2] = (sopass_val & 0xff); + wol->sopass[3] = (sopass_val >> 8); + + sopass_val = phy_read_mmd(phydev, DP83811_DEVADDR, + MII_DP83811_RXSOP3); + wol->sopass[4] = (sopass_val & 0xff); + wol->sopass[5] = (sopass_val >> 8); + + wol->wolopts |= WAKE_MAGICSECURE; + } + + /* WoL is not enabled so set wolopts to 0 */ + if (!(value & DP83811_WOL_EN)) + wol->wolopts = 0; +} + +static int dp83811_config_intr(struct phy_device *phydev) +{ + int misr_status, err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + misr_status = phy_read(phydev, MII_DP83811_INT_STAT1); + if (misr_status < 0) + return misr_status; + + misr_status |= (DP83811_RX_ERR_HF_INT_EN | + DP83811_MS_TRAINING_INT_EN | + DP83811_ANEG_COMPLETE_INT_EN | + DP83811_ESD_EVENT_INT_EN | + DP83811_WOL_INT_EN | + DP83811_LINK_STAT_INT_EN | + DP83811_ENERGY_DET_INT_EN | + DP83811_LINK_QUAL_INT_EN); + + err = phy_write(phydev, MII_DP83811_INT_STAT1, misr_status); + if (err < 0) + return err; + + misr_status = phy_read(phydev, MII_DP83811_INT_STAT2); + if (misr_status < 0) + return misr_status; + + misr_status |= (DP83811_JABBER_DET_INT_EN | + DP83811_POLARITY_INT_EN | + DP83811_SLEEP_MODE_INT_EN | + DP83811_OVERTEMP_INT_EN | + DP83811_OVERVOLTAGE_INT_EN | + DP83811_UNDERVOLTAGE_INT_EN); + + err = phy_write(phydev, MII_DP83811_INT_STAT2, misr_status); + + } else { + err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); + if (err < 0) + return err; + + err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); + } + + return err; +} + +static int dp83811_config_aneg(struct phy_device *phydev) +{ + int value, err; + + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + value = phy_read(phydev, MII_DP83811_SGMII_CTRL); + if (phydev->autoneg == AUTONEG_ENABLE) { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (DP83811_SGMII_AUTO_NEG_EN | value)); + if (err < 0) + return err; + } else { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (~DP83811_SGMII_AUTO_NEG_EN & value)); + if (err < 0) + return err; + } + } + + return genphy_config_aneg(phydev); +} + +static int dp83811_config_init(struct phy_device *phydev) +{ + int value, err; + + err = genphy_config_init(phydev); + if (err < 0) + return err; + + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + value = phy_read(phydev, MII_DP83811_SGMII_CTRL); + if (!(value & DP83811_SGMII_EN)) { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (DP83811_SGMII_EN | value)); + if (err < 0) + return err; + } else { + err = phy_write(phydev, MII_DP83811_SGMII_CTRL, + (~DP83811_SGMII_EN & value)); + if (err < 0) + return err; + } + } + + value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN; + + return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, + value); +} + +static int dp83811_phy_reset(struct phy_device *phydev) +{ + int err; + + err = phy_write(phydev, MII_DP83811_RESET_CTRL, DP83811_HW_RESET); + if (err < 0) + return err; + + return 0; +} + +static int dp83811_suspend(struct phy_device *phydev) +{ + int value; + + value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG); + + if (!(value & DP83811_WOL_EN)) + genphy_suspend(phydev); + + return 0; +} + +static int dp83811_resume(struct phy_device *phydev) +{ + int value; + + genphy_resume(phydev); + + value = phy_read_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG); + + phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG, value | + DP83811_WOL_CLR_INDICATION); + + return 0; +} + +static struct phy_driver dp83811_driver[] = { + { + .phy_id = DP83TC811_PHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "TI DP83TC811", + .features = PHY_BASIC_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = dp83811_config_init, + .config_aneg = dp83811_config_aneg, + .soft_reset = dp83811_phy_reset, + .get_wol = dp83811_get_wol, + .set_wol = dp83811_set_wol, + .ack_interrupt = dp83811_ack_interrupt, + .config_intr = dp83811_config_intr, + .suspend = dp83811_suspend, + .resume = dp83811_resume, + }, +}; +module_phy_driver(dp83811_driver); + +static struct mdio_device_id __maybe_unused dp83811_tbl[] = { + { DP83TC811_PHY_ID, 0xfffffff0 }, + { }, +}; +MODULE_DEVICE_TABLE(mdio, dp83811_tbl); + +MODULE_DESCRIPTION("Texas Instruments DP83TC811 PHY driver"); +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 25e2a099b71c..b8f57e9b9379 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1482,9 +1482,6 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) } } -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif static u64 marvell_get_stat(struct phy_device *phydev, int i) { struct marvell_hw_stat stat = marvell_hw_stats[i]; @@ -1494,7 +1491,7 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) val = phy_read_paged(phydev, stat.page, stat.reg); if (val < 0) { - ret = UINT64_MAX; + ret = U64_MAX; } else { val = val & ((1 << stat.bits) - 1); priv->stats[i] += val; diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 403b085f0a89..15352f987bdf 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -205,14 +205,6 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) return 0; } -static int mdiobb_reset(struct mii_bus *bus) -{ - struct mdiobb_ctrl *ctrl = bus->priv; - if (ctrl->reset) - ctrl->reset(bus); - return 0; -} - struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; @@ -225,7 +217,6 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) bus->read = mdiobb_read; bus->write = mdiobb_write; - bus->reset = mdiobb_reset; bus->priv = ctrl; return bus; diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 1861f387820d..863496fa5d13 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -30,17 +30,20 @@ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, struct mdio_board_info *bi)) { struct mdio_board_entry *be; + struct mdio_board_entry *tmp; struct mdio_board_info *bi; int ret; mutex_lock(&mdio_board_lock); - list_for_each_entry(be, &mdio_board_list, list) { + list_for_each_entry_safe(be, tmp, &mdio_board_list, list) { bi = &be->board_info; if (strcmp(bus->id, bi->bus_id)) continue; + mutex_unlock(&mdio_board_lock); ret = cb(bus, bi); + mutex_lock(&mdio_board_lock); if (ret) continue; diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 4333c6e14742..4e4c8daf44c3 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -24,8 +24,10 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/mdio-bitbang.h> +#include <linux/mdio-gpio.h> #include <linux/gpio.h> -#include <linux/platform_data/mdio-gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_gpio.h> #include <linux/of_mdio.h> @@ -35,37 +37,22 @@ struct mdio_gpio_info { struct gpio_desc *mdc, *mdio, *mdo; }; -static void *mdio_gpio_of_get_data(struct platform_device *pdev) +static int mdio_gpio_get_data(struct device *dev, + struct mdio_gpio_info *bitbang) { - struct device_node *np = pdev->dev.of_node; - struct mdio_gpio_platform_data *pdata; - enum of_gpio_flags flags; - int ret; - - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - ret = of_get_gpio_flags(np, 0, &flags); - if (ret < 0) - return NULL; - - pdata->mdc = ret; - pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW; - - ret = of_get_gpio_flags(np, 1, &flags); - if (ret < 0) - return NULL; - pdata->mdio = ret; - pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW; - - ret = of_get_gpio_flags(np, 2, &flags); - if (ret > 0) { - pdata->mdo = ret; - pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW; - } - - return pdata; + bitbang->mdc = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDC, + GPIOD_OUT_LOW); + if (IS_ERR(bitbang->mdc)) + return PTR_ERR(bitbang->mdc); + + bitbang->mdio = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDIO, + GPIOD_IN); + if (IS_ERR(bitbang->mdio)) + return PTR_ERR(bitbang->mdio); + + bitbang->mdo = devm_gpiod_get_index_optional(dev, NULL, MDIO_GPIO_MDO, + GPIOD_OUT_LOW); + return PTR_ERR_OR_ZERO(bitbang->mdo); } static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) @@ -125,78 +112,28 @@ static const struct mdiobb_ops mdio_gpio_ops = { }; static struct mii_bus *mdio_gpio_bus_init(struct device *dev, - struct mdio_gpio_platform_data *pdata, + struct mdio_gpio_info *bitbang, int bus_id) { struct mii_bus *new_bus; - struct mdio_gpio_info *bitbang; - int i; - int mdc, mdio, mdo; - unsigned long mdc_flags = GPIOF_OUT_INIT_LOW; - unsigned long mdio_flags = GPIOF_DIR_IN; - unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH; - - bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL); - if (!bitbang) - goto out; bitbang->ctrl.ops = &mdio_gpio_ops; - bitbang->ctrl.reset = pdata->reset; - mdc = pdata->mdc; - bitbang->mdc = gpio_to_desc(mdc); - if (pdata->mdc_active_low) - mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW; - mdio = pdata->mdio; - bitbang->mdio = gpio_to_desc(mdio); - if (pdata->mdio_active_low) - mdio_flags |= GPIOF_ACTIVE_LOW; - mdo = pdata->mdo; - if (mdo) { - bitbang->mdo = gpio_to_desc(mdo); - if (pdata->mdo_active_low) - mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW; - } new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) - goto out; - - new_bus->name = "GPIO Bitbanged MDIO", + return NULL; - new_bus->phy_mask = pdata->phy_mask; - new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; - memcpy(new_bus->irq, pdata->irqs, sizeof(new_bus->irq)); + new_bus->name = "GPIO Bitbanged MDIO"; new_bus->parent = dev; - if (new_bus->phy_mask == ~0) - goto out_free_bus; - - for (i = 0; i < PHY_MAX_ADDR; i++) - if (!new_bus->irq[i]) - new_bus->irq[i] = PHY_POLL; - if (bus_id != -1) snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); - if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc")) - goto out_free_bus; - - if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio")) - goto out_free_bus; - - if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo")) - goto out_free_bus; - dev_set_drvdata(dev, new_bus); return new_bus; - -out_free_bus: - free_mdio_bitbang(new_bus); -out: - return NULL; } static void mdio_gpio_bus_deinit(struct device *dev) @@ -216,34 +153,33 @@ static void mdio_gpio_bus_destroy(struct device *dev) static int mdio_gpio_probe(struct platform_device *pdev) { - struct mdio_gpio_platform_data *pdata; + struct mdio_gpio_info *bitbang; struct mii_bus *new_bus; int ret, bus_id; + bitbang = devm_kzalloc(&pdev->dev, sizeof(*bitbang), GFP_KERNEL); + if (!bitbang) + return -ENOMEM; + + ret = mdio_gpio_get_data(&pdev->dev, bitbang); + if (ret) + return ret; + if (pdev->dev.of_node) { - pdata = mdio_gpio_of_get_data(pdev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); if (bus_id < 0) { dev_warn(&pdev->dev, "failed to get alias id\n"); bus_id = 0; } } else { - pdata = dev_get_platdata(&pdev->dev); bus_id = pdev->id; } - if (!pdata) - return -ENODEV; - - new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id); + new_bus = mdio_gpio_bus_init(&pdev->dev, bitbang, bus_id); if (!new_bus) return -ENODEV; - if (pdev->dev.of_node) - ret = of_mdiobus_register(new_bus, pdev->dev.of_node); - else - ret = mdiobus_register(new_bus); - + ret = of_mdiobus_register(new_bus, pdev->dev.of_node); if (ret) mdio_gpio_bus_deinit(&pdev->dev); diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/phy/mdio-mscc-miim.c new file mode 100644 index 000000000000..badbc99bedd3 --- /dev/null +++ b/drivers/net/phy/mdio-mscc-miim.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Driver for the MDIO interface of Microsemi network switches. + * + * Author: Alexandre Belloni <alexandre.belloni@bootlin.com> + * Copyright (c) 2017 Microsemi Corporation + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/of_mdio.h> + +#define MSCC_MIIM_REG_STATUS 0x0 +#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3) +#define MSCC_MIIM_REG_CMD 0x8 +#define MSCC_MIIM_CMD_OPR_WRITE BIT(1) +#define MSCC_MIIM_CMD_OPR_READ BIT(2) +#define MSCC_MIIM_CMD_WRDATA_SHIFT 4 +#define MSCC_MIIM_CMD_REGAD_SHIFT 20 +#define MSCC_MIIM_CMD_PHYAD_SHIFT 25 +#define MSCC_MIIM_CMD_VLD BIT(31) +#define MSCC_MIIM_REG_DATA 0xC +#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17)) + +#define MSCC_PHY_REG_PHY_CFG 0x0 +#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3)) +#define PHY_CFG_PHY_COMMON_RESET BIT(4) +#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8)) +#define MSCC_PHY_REG_PHY_STATUS 0x4 + +struct mscc_miim_dev { + void __iomem *regs; + void __iomem *phy_regs; +}; + +static int mscc_miim_wait_ready(struct mii_bus *bus) +{ + struct mscc_miim_dev *miim = bus->priv; + u32 val; + + readl_poll_timeout(miim->regs + MSCC_MIIM_REG_STATUS, val, + !(val & MSCC_MIIM_STATUS_STAT_BUSY), 100, 250000); + if (val & MSCC_MIIM_STATUS_STAT_BUSY) + return -ETIMEDOUT; + + return 0; +} + +static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct mscc_miim_dev *miim = bus->priv; + u32 val; + int ret; + + ret = mscc_miim_wait_ready(bus); + if (ret) + goto out; + + writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ, + miim->regs + MSCC_MIIM_REG_CMD); + + ret = mscc_miim_wait_ready(bus); + if (ret) + goto out; + + val = readl(miim->regs + MSCC_MIIM_REG_DATA); + if (val & MSCC_MIIM_DATA_ERROR) { + ret = -EIO; + goto out; + } + + ret = val & 0xFFFF; +out: + return ret; +} + +static int mscc_miim_write(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + struct mscc_miim_dev *miim = bus->priv; + int ret; + + ret = mscc_miim_wait_ready(bus); + if (ret < 0) + goto out; + + writel(MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | + (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | + MSCC_MIIM_CMD_OPR_WRITE, + miim->regs + MSCC_MIIM_REG_CMD); + +out: + return ret; +} + +static int mscc_miim_reset(struct mii_bus *bus) +{ + struct mscc_miim_dev *miim = bus->priv; + + if (miim->phy_regs) { + writel(0, miim->phy_regs + MSCC_PHY_REG_PHY_CFG); + writel(0x1ff, miim->phy_regs + MSCC_PHY_REG_PHY_CFG); + mdelay(500); + } + + return 0; +} + +static int mscc_miim_probe(struct platform_device *pdev) +{ + struct resource *res; + struct mii_bus *bus; + struct mscc_miim_dev *dev; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev)); + if (!bus) + return -ENOMEM; + + bus->name = "mscc_miim"; + bus->read = mscc_miim_read; + bus->write = mscc_miim_write; + bus->reset = mscc_miim_reset; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev)); + bus->parent = &pdev->dev; + + dev = bus->priv; + dev->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->regs)) { + dev_err(&pdev->dev, "Unable to map MIIM registers\n"); + return PTR_ERR(dev->regs); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + dev->phy_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->phy_regs)) { + dev_err(&pdev->dev, "Unable to map internal phy registers\n"); + return PTR_ERR(dev->phy_regs); + } + } + + ret = of_mdiobus_register(bus, pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); + return ret; + } + + platform_set_drvdata(pdev, bus); + + return 0; +} + +static int mscc_miim_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + + mdiobus_unregister(bus); + + return 0; +} + +static const struct of_device_id mscc_miim_match[] = { + { .compatible = "mscc,ocelot-miim" }, + { } +}; +MODULE_DEVICE_TABLE(of, mscc_miim_match); + +static struct platform_driver mscc_miim_driver = { + .probe = mscc_miim_probe, + .remove = mscc_miim_remove, + .driver = { + .name = "mscc-miim", + .of_match_table = mscc_miim_match, + }, +}; + +module_platform_driver(mscc_miim_driver); + +MODULE_DESCRIPTION("Microsemi MIIM driver"); +MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 24b5511222c8..98f4b1f706df 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -717,58 +717,10 @@ static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -#ifdef CONFIG_PM -static int mdio_bus_suspend(struct device *dev) -{ - struct mdio_device *mdio = to_mdio_device(dev); - - if (mdio->pm_ops && mdio->pm_ops->suspend) - return mdio->pm_ops->suspend(dev); - - return 0; -} - -static int mdio_bus_resume(struct device *dev) -{ - struct mdio_device *mdio = to_mdio_device(dev); - - if (mdio->pm_ops && mdio->pm_ops->resume) - return mdio->pm_ops->resume(dev); - - return 0; -} - -static int mdio_bus_restore(struct device *dev) -{ - struct mdio_device *mdio = to_mdio_device(dev); - - if (mdio->pm_ops && mdio->pm_ops->restore) - return mdio->pm_ops->restore(dev); - - return 0; -} - -static const struct dev_pm_ops mdio_bus_pm_ops = { - .suspend = mdio_bus_suspend, - .resume = mdio_bus_resume, - .freeze = mdio_bus_suspend, - .thaw = mdio_bus_resume, - .restore = mdio_bus_restore, -}; - -#define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops) - -#else - -#define MDIO_BUS_PM_OPS NULL - -#endif /* CONFIG_PM */ - struct bus_type mdio_bus_type = { .name = "mdio_bus", .match = mdio_bus_match, .uevent = mdio_uevent, - .pm = MDIO_BUS_PM_OPS, }; EXPORT_SYMBOL(mdio_bus_type); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ab195f0916d6..3db06b40580d 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -681,9 +681,6 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data) } } -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif static u64 kszphy_get_stat(struct phy_device *phydev, int i) { struct kszphy_hw_stat stat = kszphy_hw_stats[i]; @@ -693,7 +690,7 @@ static u64 kszphy_get_stat(struct phy_device *phydev, int i) val = phy_read(phydev, stat.reg); if (val < 0) { - ret = UINT64_MAX; + ret = U64_MAX; } else { val = val & ((1 << stat.bits) - 1); priv->stats[i] += val; diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index a97ac8c12c4c..2d67937866a3 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -21,6 +21,8 @@ #include <linux/phy.h> #include <linux/microchipphy.h> #include <linux/delay.h> +#include <linux/of.h> +#include <dt-bindings/net/microchip-lan78xx.h> #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_DESC "Microchip LAN88XX PHY driver" @@ -225,6 +227,8 @@ static int lan88xx_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; struct lan88xx_priv *priv; + u32 led_modes[4]; + int len; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -232,6 +236,27 @@ static int lan88xx_probe(struct phy_device *phydev) priv->wolopts = 0; + len = of_property_read_variable_u32_array(dev->of_node, + "microchip,led-modes", + led_modes, + 0, + ARRAY_SIZE(led_modes)); + if (len >= 0) { + u32 reg = 0; + int i; + + for (i = 0; i < len; i++) { + if (led_modes[i] > 15) + return -EINVAL; + reg |= led_modes[i] << (i * 4); + } + for (; i < ARRAY_SIZE(led_modes); i++) + reg |= LAN78XX_FORCE_LED_OFF << (i * 4); + (void)phy_write(phydev, LAN78XX_PHY_LED_MODE_SELECT, reg); + } else if (len == -EOVERFLOW) { + return -EINVAL; + } + /* these values can be used to identify internal PHY */ priv->chip_id = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_ID); priv->chip_rev = phy_read_mmd(phydev, 3, LAN88XX_MMD3_CHIP_REV); diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c new file mode 100644 index 000000000000..b1917dd1978a --- /dev/null +++ b/drivers/net/phy/microchip_t1.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 Microchip Technology + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mii.h> +#include <linux/phy.h> + +/* Interrupt Source Register */ +#define LAN87XX_INTERRUPT_SOURCE (0x18) + +/* Interrupt Mask Register */ +#define LAN87XX_INTERRUPT_MASK (0x19) +#define LAN87XX_MASK_LINK_UP (0x0004) +#define LAN87XX_MASK_LINK_DOWN (0x0002) + +#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>" +#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver" + +static int lan87xx_phy_config_intr(struct phy_device *phydev) +{ + int rc, val = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + /* unmask all source and clear them before enable */ + rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF); + rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); + val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN; + } + + rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val); + + return rc < 0 ? rc : 0; +} + +static int lan87xx_phy_ack_interrupt(struct phy_device *phydev) +{ + int rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE); + + return rc < 0 ? rc : 0; +} + +static struct phy_driver microchip_t1_phy_driver[] = { + { + .phy_id = 0x0007c150, + .phy_id_mask = 0xfffffff0, + .name = "Microchip LAN87xx T1", + + .features = SUPPORTED_100baseT_Full, + .flags = PHY_HAS_INTERRUPT, + + .config_init = genphy_config_init, + .config_aneg = genphy_config_aneg, + + .ack_interrupt = lan87xx_phy_ack_interrupt, + .config_intr = lan87xx_phy_config_intr, + + .suspend = genphy_suspend, + .resume = genphy_resume, + } +}; + +module_phy_driver(microchip_t1_phy_driver); + +static struct mdio_device_id __maybe_unused microchip_t1_tbl[] = { + { 0x0007c150, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, microchip_t1_tbl); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 05c1e8ef15e6..537297d2b4b4 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -894,7 +894,7 @@ void phy_state_machine(struct work_struct *work) needs_aneg = true; break; case PHY_NOLINK: - if (phy_interrupt_is_valid(phydev)) + if (phydev->irq != PHY_POLL) break; err = phy_read_status(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 9e4ba8e80a18..bd0f339f69fd 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -346,6 +346,55 @@ static int phy_bus_match(struct device *dev, struct device_driver *drv) } } +static ssize_t +phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); +} +static DEVICE_ATTR_RO(phy_id); + +static ssize_t +phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + const char *mode = NULL; + + if (phy_is_internal(phydev)) + mode = "internal"; + else + mode = phy_modes(phydev->interface); + + return sprintf(buf, "%s\n", mode); +} +static DEVICE_ATTR_RO(phy_interface); + +static ssize_t +phy_has_fixups_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct phy_device *phydev = to_phy_device(dev); + + return sprintf(buf, "%d\n", phydev->has_fixups); +} +static DEVICE_ATTR_RO(phy_has_fixups); + +static struct attribute *phy_dev_attrs[] = { + &dev_attr_phy_id.attr, + &dev_attr_phy_interface.attr, + &dev_attr_phy_has_fixups.attr, + NULL, +}; +ATTRIBUTE_GROUPS(phy_dev); + +static const struct device_type mdio_bus_phy_type = { + .name = "PHY", + .groups = phy_dev_groups, + .release = phy_device_release, + .pm = MDIO_BUS_PHY_PM_OPS, +}; + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) @@ -359,11 +408,10 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, return ERR_PTR(-ENOMEM); mdiodev = &dev->mdio; - mdiodev->dev.release = phy_device_release; mdiodev->dev.parent = &bus->dev; mdiodev->dev.bus = &mdio_bus_type; + mdiodev->dev.type = &mdio_bus_phy_type; mdiodev->bus = bus; - mdiodev->pm_ops = MDIO_BUS_PHY_PM_OPS; mdiodev->bus_match = phy_bus_match; mdiodev->addr = addr; mdiodev->flags = MDIO_DEVICE_FLAG_PHY; @@ -587,48 +635,6 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) } EXPORT_SYMBOL(get_phy_device); -static ssize_t -phy_id_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id); -} -static DEVICE_ATTR_RO(phy_id); - -static ssize_t -phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - const char *mode = NULL; - - if (phy_is_internal(phydev)) - mode = "internal"; - else - mode = phy_modes(phydev->interface); - - return sprintf(buf, "%s\n", mode); -} -static DEVICE_ATTR_RO(phy_interface); - -static ssize_t -phy_has_fixups_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct phy_device *phydev = to_phy_device(dev); - - return sprintf(buf, "%d\n", phydev->has_fixups); -} -static DEVICE_ATTR_RO(phy_has_fixups); - -static struct attribute *phy_dev_attrs[] = { - &dev_attr_phy_id.attr, - &dev_attr_phy_interface.attr, - &dev_attr_phy_has_fixups.attr, - NULL, -}; -ATTRIBUTE_GROUPS(phy_dev); - /** * phy_device_register - Register the phy device on the MDIO bus * @phydev: phy_device structure to be added to the MDIO bus @@ -651,8 +657,6 @@ int phy_device_register(struct phy_device *phydev) goto out; } - phydev->mdio.dev.groups = phy_dev_groups; - err = device_add(&phydev->mdio.dev); if (err) { pr_err("PHY %d failed to add\n", phydev->mdio.addr); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index c582b2d7546c..af4dc4425be2 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -19,6 +19,7 @@ #include <linux/phylink.h> #include <linux/rtnetlink.h> #include <linux/spinlock.h> +#include <linux/timer.h> #include <linux/workqueue.h> #include "sfp.h" @@ -54,6 +55,7 @@ struct phylink { /* The link configuration settings */ struct phylink_link_state link_config; struct gpio_desc *link_gpio; + struct timer_list link_poll; void (*get_fixed_state)(struct net_device *dev, struct phylink_link_state *s); @@ -360,7 +362,7 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat if (pl->get_fixed_state) pl->get_fixed_state(pl->netdev, state); else if (pl->link_gpio) - state->link = !!gpiod_get_value(pl->link_gpio); + state->link = !!gpiod_get_value_cansleep(pl->link_gpio); } /* Flow control is resolved according to our and the link partners @@ -500,6 +502,15 @@ static void phylink_run_resolve(struct phylink *pl) queue_work(system_power_efficient_wq, &pl->resolve); } +static void phylink_fixed_poll(struct timer_list *t) +{ + struct phylink *pl = container_of(t, struct phylink, link_poll); + + mod_timer(t, jiffies + HZ); + + phylink_run_resolve(pl); +} + static const struct sfp_upstream_ops sfp_phylink_ops; static int phylink_register_sfp(struct phylink *pl, @@ -572,6 +583,7 @@ struct phylink *phylink_create(struct net_device *ndev, pl->link_config.an_enabled = true; pl->ops = ops; __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + timer_setup(&pl->link_poll, phylink_fixed_poll, 0); bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); linkmode_copy(pl->link_config.advertising, pl->supported); @@ -612,6 +624,8 @@ void phylink_destroy(struct phylink *pl) { if (pl->sfp_bus) sfp_unregister_upstream(pl->sfp_bus); + if (!IS_ERR_OR_NULL(pl->link_gpio)) + gpiod_put(pl->link_gpio); cancel_work_sync(&pl->resolve); kfree(pl); @@ -903,6 +917,8 @@ void phylink_start(struct phylink *pl) clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); phylink_run_resolve(pl); + if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) + mod_timer(&pl->link_poll, jiffies + HZ); if (pl->sfp_bus) sfp_upstream_start(pl->sfp_bus); if (pl->phydev) @@ -927,6 +943,8 @@ void phylink_stop(struct phylink *pl) phy_stop(pl->phydev); if (pl->sfp_bus) sfp_upstream_stop(pl->sfp_bus); + if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) + del_timer_sync(&pl->link_poll); set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); queue_work(system_power_efficient_wq, &pl->resolve); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 9f48ecf9c627..082fb40c656d 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -145,6 +145,20 @@ static int rtl8211f_config_init(struct phy_device *phydev) return phy_modify_paged(phydev, 0xd08, 0x11, RTL8211F_TX_DELAY, val); } +static int rtl8211b_suspend(struct phy_device *phydev) +{ + phy_write(phydev, MII_MMD_DATA, BIT(9)); + + return genphy_suspend(phydev); +} + +static int rtl8211b_resume(struct phy_device *phydev) +{ + phy_write(phydev, MII_MMD_DATA, 0); + + return genphy_resume(phydev); +} + static struct phy_driver realtek_drvs[] = { { .phy_id = 0x00008201, @@ -174,6 +188,8 @@ static struct phy_driver realtek_drvs[] = { .config_intr = &rtl8211b_config_intr, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, + .suspend = rtl8211b_suspend, + .resume = rtl8211b_resume, }, { .phy_id = 0x001cc914, .name = "RTL8211DN Gigabit Ethernet", diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index fd6c23f69c2f..d437f4f5ed52 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -132,6 +132,13 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, br_max = br_nom + br_nom * id->ext.br_min / 100; br_min = br_nom - br_nom * id->ext.br_min / 100; } + + /* When using passive cables, in case neither BR,min nor BR,max + * are specified, set br_min to 0 as the nominal value is then + * used as the maximum. + */ + if (br_min == br_max && id->base.sfp_ct_passive) + br_min = 0; } /* Set ethtool support from the compliance fields. */ diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 4ab6e9a50bbe..c4c92db86dfa 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -976,6 +976,7 @@ static int sfp_probe(struct platform_device *pdev) if (pdev->dev.of_node) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *id; + struct i2c_adapter *i2c; struct device_node *np; id = of_match_node(sfp_of_match, node); @@ -985,19 +986,20 @@ static int sfp_probe(struct platform_device *pdev) sff = sfp->type = id->data; np = of_parse_phandle(node, "i2c-bus", 0); - if (np) { - struct i2c_adapter *i2c; - - i2c = of_find_i2c_adapter_by_node(np); - of_node_put(np); - if (!i2c) - return -EPROBE_DEFER; - - err = sfp_i2c_configure(sfp, i2c); - if (err < 0) { - i2c_put_adapter(i2c); - return err; - } + if (!np) { + dev_err(sfp->dev, "missing 'i2c-bus' property\n"); + return -ENODEV; + } + + i2c = of_find_i2c_adapter_by_node(np); + of_node_put(np); + if (!i2c) + return -EPROBE_DEFER; + + err = sfp_i2c_configure(sfp, i2c); + if (err < 0) { + i2c_put_adapter(i2c); + return err; } } @@ -1065,6 +1067,15 @@ static int sfp_probe(struct platform_device *pdev) if (poll) mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); + /* We could have an issue in cases no Tx disable pin is available or + * wired as modules using a laser as their light source will continue to + * be active when the fiber is removed. This could be a safety issue and + * we should at least warn the user about that. + */ + if (!sfp->gpio[GPIO_TX_DISABLE]) + dev_warn(sfp->dev, + "No tx_disable pin: SFP modules will always be emitting.\n"); + return 0; } diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index be399d645224..c328208388da 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -168,9 +168,6 @@ static void smsc_get_strings(struct phy_device *phydev, u8 *data) } } -#ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) -#endif static u64 smsc_get_stat(struct phy_device *phydev, int i) { struct smsc_hw_stat stat = smsc_hw_stats[i]; @@ -179,7 +176,7 @@ static u64 smsc_get_stat(struct phy_device *phydev, int i) val = phy_read(phydev, stat.reg); if (val < 0) - ret = UINT64_MAX; + ret = U64_MAX; else ret = val; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index ddb6bf85a59c..8863fa023500 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1004,7 +1004,8 @@ static void team_port_disable(struct team *team, static void __team_compute_features(struct team *team) { struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; + netdev_features_t vlan_features = TEAM_VLAN_FEATURES & + NETIF_F_ALL_FOR_ALL; netdev_features_t enc_features = TEAM_ENC_FEATURES; unsigned short max_hard_header_len = ETH_HLEN; unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | @@ -1026,7 +1027,8 @@ static void __team_compute_features(struct team *team) } team->dev->vlan_features = vlan_features; - team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL; + team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | + NETIF_F_GSO_UDP_L4; team->dev->hard_header_len = max_hard_header_len; team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; @@ -1128,6 +1130,7 @@ static int team_upper_dev_link(struct team *team, struct team_port *port, int err; lag_upper_info.tx_type = team->mode->lag_tx_type; + lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN; err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, &lag_upper_info, extack); if (err) @@ -2117,7 +2120,7 @@ static void team_setup(struct net_device *dev) NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; - dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; + dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; dev->features |= dev->hw_features; } @@ -2424,7 +2427,6 @@ send_done: nla_put_failure: err = -EMSGSIZE; errout: - genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } @@ -2718,7 +2720,6 @@ send_done: nla_put_failure: err = -EMSGSIZE; errout: - genlmsg_cancel(skb, hdr); nlmsg_free(skb); return err; } @@ -2942,7 +2943,7 @@ static int team_device_event(struct notifier_block *unused, case NETDEV_CHANGE: if (netif_running(port->dev)) team_port_change_check(port, - !!netif_carrier_ok(port->dev)); + !!netif_oper_up(port->dev)); break; case NETDEV_UNREGISTER: team_del_slave(port->team->dev, dev); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 23e9eb66197f..85e14adf5207 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -70,6 +70,7 @@ #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/sock.h> +#include <net/xdp.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/skb_array.h> @@ -80,6 +81,9 @@ #include <linux/uaccess.h> #include <linux/proc_fs.h> +static void tun_default_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd); + /* Uncomment to enable debugging */ /* #define TUN_DEBUG 1 */ @@ -241,6 +245,7 @@ struct tun_struct { struct bpf_prog __rcu *xdp_prog; struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; + struct ethtool_link_ksettings link_ksettings; }; struct veth { @@ -248,11 +253,11 @@ struct veth { __be16 h_vlan_TCI; }; -bool tun_is_xdp_buff(void *ptr) +bool tun_is_xdp_frame(void *ptr) { return (unsigned long)ptr & TUN_XDP_FLAG; } -EXPORT_SYMBOL(tun_is_xdp_buff); +EXPORT_SYMBOL(tun_is_xdp_frame); void *tun_xdp_to_ptr(void *ptr) { @@ -525,11 +530,6 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, rcu_read_lock(); - /* We may get a very small possibility of OOO during switching, not - * worth to optimize.*/ - if (tun->numqueues == 1 || tfile->detached) - goto unlock; - e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ @@ -548,7 +548,6 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, spin_unlock_bh(&tun->lock); } -unlock: rcu_read_unlock(); } @@ -660,10 +659,10 @@ void tun_ptr_free(void *ptr) { if (!ptr) return; - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - put_page(virt_to_head_page(xdp->data)); + xdp_return_frame(xdpf); } else { __skb_array_destroy_skb(ptr); } @@ -848,6 +847,12 @@ static int tun_attach(struct tun_struct *tun, struct file *file, tun->dev, tfile->queue_index); if (err < 0) goto out; + err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) { + xdp_rxq_info_unreg(&tfile->xdp_rxq); + goto out; + } err = 0; } @@ -1284,65 +1289,69 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, }; -static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +static void __tun_xdp_flush_tfile(struct tun_file *tfile) +{ + /* Notify and wake up reader process */ + if (tfile->flags & TUN_FASYNC) + kill_fasync(&tfile->fasync, SIGIO, POLL_IN); + tfile->socket.sk->sk_data_ready(tfile->socket.sk); +} + +static int tun_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) { struct tun_struct *tun = netdev_priv(dev); - struct xdp_buff *buff = xdp->data_hard_start; - int headroom = xdp->data - xdp->data_hard_start; struct tun_file *tfile; u32 numqueues; - int ret = 0; - - /* Assure headroom is available and buff is properly aligned */ - if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp))) - return -ENOSPC; + int drops = 0; + int cnt = n; + int i; - *buff = *xdp; + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; rcu_read_lock(); numqueues = READ_ONCE(tun->numqueues); if (!numqueues) { - ret = -ENOSPC; - goto out; + rcu_read_unlock(); + return -ENXIO; /* Caller will free/return all frames */ } tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); - /* Encode the XDP flag into lowest bit for consumer to differ - * XDP buffer from sk_buff. - */ - if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) { - this_cpu_inc(tun->pcpu_stats->tx_dropped); - ret = -ENOSPC; + + spin_lock(&tfile->tx_ring.producer_lock); + for (i = 0; i < n; i++) { + struct xdp_frame *xdp = frames[i]; + /* Encode the XDP flag into lowest bit for consumer to differ + * XDP buffer from sk_buff. + */ + void *frame = tun_xdp_to_ptr(xdp); + + if (__ptr_ring_produce(&tfile->tx_ring, frame)) { + this_cpu_inc(tun->pcpu_stats->tx_dropped); + xdp_return_frame_rx_napi(xdp); + drops++; + } } + spin_unlock(&tfile->tx_ring.producer_lock); + + if (flags & XDP_XMIT_FLUSH) + __tun_xdp_flush_tfile(tfile); -out: rcu_read_unlock(); - return ret; + return cnt - drops; } -static void tun_xdp_flush(struct net_device *dev) +static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) { - struct tun_struct *tun = netdev_priv(dev); - struct tun_file *tfile; - u32 numqueues; + struct xdp_frame *frame = convert_to_xdp_frame(xdp); - rcu_read_lock(); + if (unlikely(!frame)) + return -EOVERFLOW; - numqueues = READ_ONCE(tun->numqueues); - if (!numqueues) - goto out; - - tfile = rcu_dereference(tun->tfiles[smp_processor_id() % - numqueues]); - /* Notify and wake up reader process */ - if (tfile->flags & TUN_FASYNC) - kill_fasync(&tfile->fasync, SIGIO, POLL_IN); - tfile->socket.sk->sk_data_ready(tfile->socket.sk); - -out: - rcu_read_unlock(); + return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); } static const struct net_device_ops tap_netdev_ops = { @@ -1363,7 +1372,6 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, - .ndo_xdp_flush = tun_xdp_flush, }; static void tun_flow_init(struct tun_struct *tun) @@ -1680,14 +1688,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, case XDP_TX: get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_xmit(tun->dev, &xdp)) + if (tun_xdp_tx(tun->dev, &xdp)) goto err_redirect; - tun_xdp_flush(tun->dev); rcu_read_unlock(); local_bh_enable(); return NULL; case XDP_PASS: delta = orig_data - xdp.data; + len = xdp.data_end - xdp.data; break; default: bpf_warn_invalid_xdp_action(act); @@ -1708,7 +1716,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, } skb_reserve(skb, pad - delta); - skb_put(skb, len + delta); + skb_put(skb, len); get_page(alloc_frag->page); alloc_frag->offset += buflen; @@ -1932,10 +1940,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, local_bh_enable(); } - rcu_read_lock(); - if (!rcu_dereference(tun->steering_prog)) + /* Compute the costly rx hash only if needed for flow updates. + * We may get a very small possibility of OOO during switching, not + * worth to optimize. + */ + if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && + !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); - rcu_read_unlock(); if (frags) { /* Exercise flow dissector code path. */ @@ -2004,11 +2015,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) static ssize_t tun_put_user_xdp(struct tun_struct *tun, struct tun_file *tfile, - struct xdp_buff *xdp, + struct xdp_frame *xdp_frame, struct iov_iter *iter) { int vnet_hdr_sz = 0; - size_t size = xdp->data_end - xdp->data; + size_t size = xdp_frame->len; struct tun_pcpu_stats *stats; size_t ret; @@ -2024,7 +2035,7 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun, iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } - ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz; + ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; stats = get_cpu_ptr(tun->pcpu_stats); u64_stats_update_begin(&stats->syncp); @@ -2192,11 +2203,11 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, return err; } - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - ret = tun_put_user_xdp(tun, tfile, xdp, to); - put_page(virt_to_head_page(xdp->data)); + ret = tun_put_user_xdp(tun, tfile, xdpf, to); + xdp_return_frame(xdpf); } else { struct sk_buff *skb = ptr; @@ -2278,6 +2289,7 @@ static void tun_setup(struct net_device *dev) tun->owner = INVALID_UID; tun->group = INVALID_GID; + tun_default_link_ksettings(dev, &tun->link_ksettings); dev->ethtool_ops = &tun_ethtool_ops; dev->needs_free_netdev = true; @@ -2435,10 +2447,10 @@ out_free: static int tun_ptr_peek_len(void *ptr) { if (likely(ptr)) { - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - return xdp->data_end - xdp->data; + return xdpf->len; } return __skb_array_len_with_tag(ptr); } else { @@ -2852,10 +2864,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { struct tun_file *tfile = file->private_data; + struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; struct ifreq ifr; - struct net *net; kuid_t owner; kgid_t group; int sndbuf; @@ -2879,14 +2891,18 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, */ return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, (unsigned int __user*)argp); - } else if (cmd == TUNSETQUEUE) + } else if (cmd == TUNSETQUEUE) { return tun_set_queue(file, &ifr); + } else if (cmd == SIOCGSKNS) { + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + return open_related_ns(&net->ns, get_net_ns); + } ret = 0; rtnl_lock(); tun = tun_get(tfile); - net = sock_net(&tfile->sk); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) @@ -2916,14 +2932,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tfile->ifindex = ifindex; goto unlock; } - if (cmd == SIOCGSKNS) { - ret = -EPERM; - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - goto unlock; - - ret = open_related_ns(&net->ns, get_net_ns); - goto unlock; - } ret = -EBADFD; if (!tun) @@ -3313,8 +3321,8 @@ static struct miscdevice tun_miscdev = { /* ethtool interface */ -static int tun_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) +static void tun_default_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); @@ -3323,6 +3331,23 @@ static int tun_get_link_ksettings(struct net_device *dev, cmd->base.port = PORT_TP; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; +} + +static int tun_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct tun_struct *tun = netdev_priv(dev); + + memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); + return 0; +} + +static int tun_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct tun_struct *tun = netdev_priv(dev); + + memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); return 0; } @@ -3393,6 +3418,7 @@ static const struct ethtool_ops tun_ethtool_ops = { .get_coalesce = tun_get_coalesce, .set_coalesce = tun_set_coalesce, .get_link_ksettings = tun_get_link_ksettings, + .set_link_ksettings = tun_set_link_ksettings, }; static int tun_queue_resize(struct tun_struct *tun) diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index f28bd74ac275..418b0904cecb 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -111,6 +111,7 @@ config USB_LAN78XX select MII select PHYLIB select MICROCHIP_PHY + select FIXED_PHY help This option adds support for Microchip LAN78XX based USB 2 & USB 3 10/100/1000 Ethernet adapters. diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 0867f7275852..8dff87ec6d99 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -36,13 +36,14 @@ #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/microchipphy.h> -#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> #include "lan78xx.h" #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" -#define DRIVER_VERSION "1.0.6" #define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) @@ -278,6 +279,30 @@ struct lan78xx_statstage64 { u64 eee_tx_lpi_time; }; +static u32 lan78xx_regs[] = { + ID_REV, + INT_STS, + HW_CFG, + PMT_CTL, + E2P_CMD, + E2P_DATA, + USB_STATUS, + VLAN_TYPE, + MAC_CR, + MAC_RX, + MAC_TX, + FLOW, + ERR_STS, + MII_ACC, + MII_DATA, + EEE_TX_LPI_REQ_DLY, + EEE_TW_TX_SYS, + EEE_TX_LPI_REM_DLY, + WUCSR +}; + +#define PHY_REG_SIZE (32 * sizeof(u32)) + struct lan78xx_net; struct lan78xx_priv { @@ -1477,7 +1502,6 @@ static void lan78xx_get_drvinfo(struct net_device *net, struct lan78xx_net *dev = netdev_priv(net); strncpy(info->driver, DRIVER_NAME, sizeof(info->driver)); - strncpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } @@ -1605,6 +1629,34 @@ exit: return ret; } +static int lan78xx_get_regs_len(struct net_device *netdev) +{ + if (!netdev->phydev) + return (sizeof(lan78xx_regs)); + else + return (sizeof(lan78xx_regs) + PHY_REG_SIZE); +} + +static void +lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *buf) +{ + u32 *data = buf; + int i, j; + struct lan78xx_net *dev = netdev_priv(netdev); + + /* Read Device/MAC registers */ + for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++) + lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]); + + if (!netdev->phydev) + return; + + /* Read PHY registers */ + for (j = 0; j < 32; i++, j++) + data[i] = phy_read(netdev->phydev, j); +} + static const struct ethtool_ops lan78xx_ethtool_ops = { .get_link = lan78xx_get_link, .nway_reset = phy_ethtool_nway_reset, @@ -1625,6 +1677,8 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { .set_pauseparam = lan78xx_set_pause, .get_link_ksettings = lan78xx_get_link_ksettings, .set_link_ksettings = lan78xx_set_link_ksettings, + .get_regs_len = lan78xx_get_regs_len, + .get_regs = lan78xx_get_regs, }; static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@ -1652,34 +1706,31 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) addr[5] = (addr_hi >> 8) & 0xFF; if (!is_valid_ether_addr(addr)) { - /* reading mac address from EEPROM or OTP */ - if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, - addr) == 0) || - (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN, - addr) == 0)) { - if (is_valid_ether_addr(addr)) { - /* eeprom values are valid so use them */ - netif_dbg(dev, ifup, dev->net, - "MAC address read from EEPROM"); - } else { - /* generate random MAC */ - random_ether_addr(addr); - netif_dbg(dev, ifup, dev->net, - "MAC address set to random addr"); - } - - addr_lo = addr[0] | (addr[1] << 8) | - (addr[2] << 16) | (addr[3] << 24); - addr_hi = addr[4] | (addr[5] << 8); - - ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); - ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) { + /* valid address present in Device Tree */ + netif_dbg(dev, ifup, dev->net, + "MAC address read from Device Tree"); + } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, + ETH_ALEN, addr) == 0) || + (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, + ETH_ALEN, addr) == 0)) && + is_valid_ether_addr(addr)) { + /* eeprom values are valid so use them */ + netif_dbg(dev, ifup, dev->net, + "MAC address read from EEPROM"); } else { /* generate random MAC */ random_ether_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } + + addr_lo = addr[0] | (addr[1] << 8) | + (addr[2] << 16) | (addr[3] << 24); + addr_hi = addr[4] | (addr[5] << 8); + + ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); + ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); } ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); @@ -1762,6 +1813,7 @@ done: static int lan78xx_mdio_init(struct lan78xx_net *dev) { + struct device_node *node; int ret; dev->mdiobus = mdiobus_alloc(); @@ -1790,7 +1842,10 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) break; } - ret = mdiobus_register(dev->mdiobus); + node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); + ret = of_mdiobus_register(dev->mdiobus, node); + if (node) + of_node_put(node); if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); goto exit1; @@ -2003,52 +2058,91 @@ static int ksz9031rnx_fixup(struct phy_device *phydev) return 1; } -static int lan78xx_phy_init(struct lan78xx_net *dev) +static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) { + u32 buf; int ret; - u32 mii_adv; + struct fixed_phy_status fphy_status = { + .link = 1, + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + }; struct phy_device *phydev; phydev = phy_find_first(dev->mdiobus); if (!phydev) { - netdev_err(dev->net, "no PHY found\n"); - return -EIO; - } - - if ((dev->chipid == ID_REV_CHIP_ID_7800_) || - (dev->chipid == ID_REV_CHIP_ID_7850_)) { - phydev->is_internal = true; - dev->interface = PHY_INTERFACE_MODE_GMII; - - } else if (dev->chipid == ID_REV_CHIP_ID_7801_) { + netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n"); + phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, + NULL); + if (IS_ERR(phydev)) { + netdev_err(dev->net, "No PHY/fixed_PHY found\n"); + return NULL; + } + netdev_dbg(dev->net, "Registered FIXED PHY\n"); + dev->interface = PHY_INTERFACE_MODE_RGMII; + ret = lan78xx_write_reg(dev, MAC_RGMII_ID, + MAC_RGMII_ID_TXC_DELAY_EN_); + ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); + ret = lan78xx_read_reg(dev, HW_CFG, &buf); + buf |= HW_CFG_CLK125_EN_; + buf |= HW_CFG_REFCLK25_EN_; + ret = lan78xx_write_reg(dev, HW_CFG, buf); + } else { if (!phydev->drv) { netdev_err(dev->net, "no PHY driver found\n"); - return -EIO; + return NULL; } - dev->interface = PHY_INTERFACE_MODE_RGMII; - /* external PHY fixup for KSZ9031RNX */ ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0, ksz9031rnx_fixup); if (ret < 0) { - netdev_err(dev->net, "fail to register fixup\n"); - return ret; + netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n"); + return NULL; } /* external PHY fixup for LAN8835 */ ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0, lan8835_fixup); if (ret < 0) { - netdev_err(dev->net, "fail to register fixup\n"); - return ret; + netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n"); + return NULL; } /* add more external PHY fixup here if needed */ phydev->is_internal = false; - } else { - netdev_err(dev->net, "unknown ID found\n"); - ret = -EIO; - goto error; + } + return phydev; +} + +static int lan78xx_phy_init(struct lan78xx_net *dev) +{ + int ret; + u32 mii_adv; + struct phy_device *phydev; + + switch (dev->chipid) { + case ID_REV_CHIP_ID_7801_: + phydev = lan7801_phy_init(dev); + if (!phydev) { + netdev_err(dev->net, "lan7801: PHY Init Failed"); + return -EIO; + } + break; + + case ID_REV_CHIP_ID_7800_: + case ID_REV_CHIP_ID_7850_: + phydev = phy_find_first(dev->mdiobus); + if (!phydev) { + netdev_err(dev->net, "no PHY found\n"); + return -EIO; + } + phydev->is_internal = true; + dev->interface = PHY_INTERFACE_MODE_GMII; + break; + + default: + netdev_err(dev->net, "Unknown CHIP ID found\n"); + return -EIO; } /* if phyirq is not set, use polling mode in phylib */ @@ -2067,6 +2161,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (ret) { netdev_err(dev->net, "can't attach PHY to %s\n", dev->mdiobus->id); + if (dev->chipid == ID_REV_CHIP_ID_7801_) { + if (phy_is_pseudo_fixed_link(phydev)) { + fixed_phy_unregister(phydev); + } else { + phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, + 0xfffffff0); + phy_unregister_fixup_for_uid(PHY_LAN8835, + 0xfffffff0); + } + } return -EIO; } @@ -2079,17 +2183,33 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + if (phydev->mdio.dev.of_node) { + u32 reg; + int len; + + len = of_property_count_elems_of_size(phydev->mdio.dev.of_node, + "microchip,led-modes", + sizeof(u32)); + if (len >= 0) { + /* Ensure the appropriate LEDs are enabled */ + lan78xx_read_reg(dev, HW_CFG, ®); + reg &= ~(HW_CFG_LED0_EN_ | + HW_CFG_LED1_EN_ | + HW_CFG_LED2_EN_ | + HW_CFG_LED3_EN_); + reg |= (len > 0) * HW_CFG_LED0_EN_ | + (len > 1) * HW_CFG_LED1_EN_ | + (len > 2) * HW_CFG_LED2_EN_ | + (len > 3) * HW_CFG_LED3_EN_; + lan78xx_write_reg(dev, HW_CFG, reg); + } + } + genphy_config_aneg(phydev); dev->fc_autoneg = phydev->autoneg; return 0; - -error: - phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); - phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); - - return ret; } static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) @@ -3487,6 +3607,7 @@ static void lan78xx_disconnect(struct usb_interface *intf) struct lan78xx_net *dev; struct usb_device *udev; struct net_device *net; + struct phy_device *phydev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); @@ -3495,12 +3616,16 @@ static void lan78xx_disconnect(struct usb_interface *intf) udev = interface_to_usbdev(intf); net = dev->net; + phydev = net->phydev; phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); phy_disconnect(net->phydev); + if (phy_is_pseudo_fixed_link(phydev)) + fixed_phy_unregister(phydev); + unregister_netdev(net); cancel_delayed_work_sync(&dev->wq); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 094680871687..8e8b51f171f4 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1249,7 +1249,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ - {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ + {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 032e1ac10a30..2aaa18ec7d46 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -30,8 +30,11 @@ #include <linux/cpu.h> #include <linux/average.h> #include <linux/filter.h> +#include <linux/netdevice.h> +#include <linux/pci.h> #include <net/route.h> #include <net/xdp.h> +#include <net/net_failover.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -210,6 +213,9 @@ struct virtnet_info { u32 speed; unsigned long guest_offloads; + + /* failover when STANDBY feature enabled */ + struct failover *failover; }; struct padded_vnet_hdr { @@ -407,58 +413,73 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, return skb; } -static void virtnet_xdp_flush(struct net_device *dev) +static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, + struct send_queue *sq, + struct xdp_frame *xdpf) { - struct virtnet_info *vi = netdev_priv(dev); - struct send_queue *sq; - unsigned int qp; + struct virtio_net_hdr_mrg_rxbuf *hdr; + int err; - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); - sq = &vi->sq[qp]; + /* virtqueue want to use data area in-front of packet */ + if (unlikely(xdpf->metasize > 0)) + return -EOPNOTSUPP; - virtqueue_kick(sq->vq); + if (unlikely(xdpf->headroom < vi->hdr_len)) + return -EOVERFLOW; + + /* Make room for virtqueue hdr (also change xdpf->headroom?) */ + xdpf->data -= vi->hdr_len; + /* Zero header and leave csum up to XDP layers */ + hdr = xdpf->data; + memset(hdr, 0, vi->hdr_len); + xdpf->len += vi->hdr_len; + + sg_init_one(sq->sg, xdpf->data, xdpf->len); + + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); + if (unlikely(err)) + return -ENOSPC; /* Caller handle free/refcnt */ + + return 0; } -static bool __virtnet_xdp_xmit(struct virtnet_info *vi, - struct xdp_buff *xdp) +static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi, + struct xdp_frame *xdpf) { - struct virtio_net_hdr_mrg_rxbuf *hdr; - unsigned int len; + struct xdp_frame *xdpf_sent; struct send_queue *sq; + unsigned int len; unsigned int qp; - void *xdp_sent; - int err; qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); sq = &vi->sq[qp]; /* Free up any pending old buffers before queueing new ones. */ - while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { - struct page *sent_page = virt_to_head_page(xdp_sent); - - put_page(sent_page); - } - - xdp->data -= vi->hdr_len; - /* Zero header and leave csum up to XDP layers */ - hdr = xdp->data; - memset(hdr, 0, vi->hdr_len); - - sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); - - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); - if (unlikely(err)) - return false; /* Caller handle free/refcnt */ + while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) + xdp_return_frame(xdpf_sent); - return true; + return __virtnet_xdp_xmit_one(vi, sq, xdpf); } -static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +static int virtnet_xdp_xmit(struct net_device *dev, + int n, struct xdp_frame **frames, u32 flags) { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; + struct xdp_frame *xdpf_sent; struct bpf_prog *xdp_prog; - bool sent; + struct send_queue *sq; + unsigned int len; + unsigned int qp; + int drops = 0; + int err; + int i; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id(); + sq = &vi->sq[qp]; /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. @@ -467,10 +488,24 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (!xdp_prog) return -ENXIO; - sent = __virtnet_xdp_xmit(vi, xdp); - if (!sent) - return -ENOSPC; - return 0; + /* Free up any pending old buffers before queueing new ones. */ + while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) + xdp_return_frame(xdpf_sent); + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + + err = __virtnet_xdp_xmit_one(vi, sq, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) + virtqueue_kick(sq->vq); + + return n - drops; } static unsigned int virtnet_get_headroom(struct virtnet_info *vi) @@ -559,7 +594,6 @@ static struct sk_buff *receive_small(struct net_device *dev, struct page *page = virt_to_head_page(buf); unsigned int delta = 0; struct page *xdp_page; - bool sent; int err; len -= vi->hdr_len; @@ -568,6 +602,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset; + struct xdp_frame *xdpf; struct xdp_buff xdp; void *orig_data; u32 act; @@ -608,10 +643,14 @@ static struct sk_buff *receive_small(struct net_device *dev, case XDP_PASS: /* Recalculate length in case bpf program changed it */ delta = orig_data - xdp.data; + len = xdp.data_end - xdp.data; break; case XDP_TX: - sent = __virtnet_xdp_xmit(vi, &xdp); - if (unlikely(!sent)) { + xdpf = convert_to_xdp_frame(&xdp); + if (unlikely(!xdpf)) + goto err_xdp; + err = __virtnet_xdp_tx_xmit(vi, xdpf); + if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; } @@ -641,7 +680,7 @@ static struct sk_buff *receive_small(struct net_device *dev, goto err; } skb_reserve(skb, headroom - delta); - skb_put(skb, len + delta); + skb_put(skb, len); if (!delta) { buf += header_offset; memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); @@ -694,7 +733,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct bpf_prog *xdp_prog; unsigned int truesize; unsigned int headroom = mergeable_ctx_to_headroom(ctx); - bool sent; int err; head_skb = NULL; @@ -702,6 +740,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, rcu_read_lock(); xdp_prog = rcu_dereference(rq->xdp_prog); if (xdp_prog) { + struct xdp_frame *xdpf; struct page *xdp_page; struct xdp_buff xdp; void *data; @@ -755,6 +794,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, offset = xdp.data - page_address(xdp_page) - vi->hdr_len; + /* recalculate len if xdp.data or xdp.data_end were + * adjusted + */ + len = xdp.data_end - xdp.data + vi->hdr_len; /* We can only create skb based on xdp_page. */ if (unlikely(xdp_page != page)) { rcu_read_unlock(); @@ -765,8 +808,11 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } break; case XDP_TX: - sent = __virtnet_xdp_xmit(vi, &xdp); - if (unlikely(!sent)) { + xdpf = convert_to_xdp_frame(&xdp); + if (unlikely(!xdpf)) + goto err_xdp; + err = __virtnet_xdp_tx_xmit(vi, xdpf); + if (unlikely(err)) { trace_xdp_exception(vi->dev, xdp_prog, act); if (unlikely(xdp_page != page)) put_page(xdp_page); @@ -1311,6 +1357,13 @@ static int virtnet_open(struct net_device *dev) if (err < 0) return err; + err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err < 0) { + xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); + return err; + } + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } @@ -1502,6 +1555,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) struct sockaddr *addr; struct scatterlist sg; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) + return -EOPNOTSUPP; + addr = kmemdup(p, sizeof(*addr), GFP_KERNEL); if (!addr) return -ENOMEM; @@ -2285,6 +2341,22 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct virtnet_info *vi = netdev_priv(dev); + int ret; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) + return -EOPNOTSUPP; + + ret = snprintf(buf, len, "sby"); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@ -2300,8 +2372,8 @@ static const struct net_device_ops virtnet_netdev = { #endif .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, - .ndo_xdp_flush = virtnet_xdp_flush, .ndo_features_check = passthru_features_check, + .ndo_get_phys_port_name = virtnet_get_phys_port_name, }; static void virtnet_config_changed_work(struct work_struct *work) @@ -2531,12 +2603,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi) vi->sq[i].vq = vqs[txq2vq(i)]; } - kfree(names); - kfree(callbacks); - kfree(vqs); - kfree(ctx); + /* run here: ret == 0. */ - return 0; err_find: kfree(ctx); @@ -2855,10 +2923,18 @@ static int virtnet_probe(struct virtio_device *vdev) virtnet_init_settings(dev); + if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) { + vi->failover = net_failover_create(vi->dev); + if (IS_ERR(vi->failover)) { + err = PTR_ERR(vi->failover); + goto free_vqs; + } + } + err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); - goto free_vqs; + goto free_failover; } virtio_device_ready(vdev); @@ -2895,6 +2971,8 @@ free_unregister_netdev: vi->vdev->config->reset(vdev); unregister_netdev(dev); +free_failover: + net_failover_destroy(vi->failover); free_vqs: cancel_delayed_work_sync(&vi->refill); free_receive_page_frags(vi); @@ -2929,6 +3007,8 @@ static void virtnet_remove(struct virtio_device *vdev) unregister_netdev(vi->dev); + net_failover_destroy(vi->failover); + remove_vq_common(vi); free_netdev(vi->dev); @@ -2978,7 +3058,7 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ - VIRTIO_NET_F_SPEED_DUPLEX + VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 27a9bb8c9611..e454dfc9ad8f 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2949,7 +2949,7 @@ vmxnet3_close(struct net_device *netdev) * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); vmxnet3_quiesce_dev(adapter); @@ -2999,7 +2999,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); @@ -3589,7 +3589,7 @@ static void vmxnet3_shutdown_device(struct pci_dev *pdev) * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) { diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 2ff27314e047..559db051a500 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -600,7 +600,7 @@ vmxnet3_set_ringparam(struct net_device *netdev, * completion. */ while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); if (netif_running(netdev)) { vmxnet3_quiesce_dev(adapter); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 0a2b180d138a..f93547f257fb 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -48,6 +48,9 @@ static unsigned int vrf_net_id; struct net_vrf { struct rtable __rcu *rth; struct rt6_info __rcu *rt6; +#if IS_ENABLED(CONFIG_IPV6) + struct fib6_table *fib6_table; +#endif u32 tb_id; }; @@ -496,7 +499,6 @@ static int vrf_rt6_create(struct net_device *dev) int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM; struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); - struct fib6_table *rt6i_table; struct rt6_info *rt6; int rc = -ENOMEM; @@ -504,8 +506,8 @@ static int vrf_rt6_create(struct net_device *dev) if (!ipv6_mod_enabled()) return 0; - rt6i_table = fib6_new_table(net, vrf->tb_id); - if (!rt6i_table) + vrf->fib6_table = fib6_new_table(net, vrf->tb_id); + if (!vrf->fib6_table) goto out; /* create a dst for routing packets out a VRF device */ @@ -513,7 +515,6 @@ static int vrf_rt6_create(struct net_device *dev) if (!rt6) goto out; - rt6->rt6i_table = rt6i_table; rt6->dst.output = vrf_output6; rcu_assign_pointer(vrf->rt6, rt6); @@ -946,22 +947,8 @@ static struct rt6_info *vrf_ip6_route_lookup(struct net *net, int flags) { struct net_vrf *vrf = netdev_priv(dev); - struct fib6_table *table = NULL; - struct rt6_info *rt6; - - rcu_read_lock(); - - /* fib6_table does not have a refcnt and can not be freed */ - rt6 = rcu_dereference(vrf->rt6); - if (likely(rt6)) - table = rt6->rt6i_table; - - rcu_read_unlock(); - - if (!table) - return NULL; - return ip6_pol_route(net, table, ifindex, fl6, skb, flags); + return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags); } static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, @@ -1267,7 +1254,7 @@ static void vrf_setup(struct net_device *dev) /* enable offload features */ dev->features |= NETIF_F_GSO_SOFTWARE; - dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM; + dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; dev->hw_features = dev->features; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index fab7a4db249e..aee0e60471f1 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2085,9 +2085,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, local_ip = vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; md->gbp = skb->mark; - ttl = vxlan->cfg.ttl; - if (!ttl && vxlan_addr_multicast(dst)) - ttl = 1; + if (flags & VXLAN_F_TTL_INHERIT) { + ttl = ip_tunnel_get_ttl(old_iph, skb); + } else { + ttl = vxlan->cfg.ttl; + if (!ttl && vxlan_addr_multicast(dst)) + ttl = 1; + } tos = vxlan->cfg.tos; if (tos == 1) @@ -2709,6 +2713,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, + [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -3254,6 +3259,12 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_VXLAN_TTL]) conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); + if (data[IFLA_VXLAN_TTL_INHERIT]) { + if (changelink) + return -EOPNOTSUPP; + conf->flags |= VXLAN_F_TTL_INHERIT; + } + if (data[IFLA_VXLAN_LABEL]) conf->label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & IPV6_FLOWLABEL_MASK; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 33df76405b86..4205dfd19da3 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -270,10 +270,10 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); /* Get BD buffer */ - bd_buffer = dma_alloc_coherent(priv->dev, - (RX_BD_RING_LEN + TX_BD_RING_LEN) * - MAX_RX_BUF_LENGTH, - &bd_dma_addr, GFP_KERNEL); + bd_buffer = dma_zalloc_coherent(priv->dev, + (RX_BD_RING_LEN + TX_BD_RING_LEN) * + MAX_RX_BUF_LENGTH, + &bd_dma_addr, GFP_KERNEL); if (!bd_buffer) { dev_err(priv->dev, "Could not allocate buffer descriptors\n"); @@ -281,9 +281,6 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) goto free_tiptr; } - memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN) - * MAX_RX_BUF_LENGTH); - priv->rx_buffer = bd_buffer; priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index deb5ae21a559..84f071ac0d84 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -4,12 +4,16 @@ config ATH10K select ATH_COMMON select CRC32 select WANT_DEV_COREDUMP + select ATH10K_CE ---help--- This module adds support for wireless adapters based on Atheros IEEE 802.11ac family of chipsets. If you choose to build a module, it'll be called ath10k. +config ATH10K_CE + bool + config ATH10K_PCI tristate "Atheros ath10k PCI support" depends on ATH10K && PCI @@ -36,6 +40,14 @@ config ATH10K_USB This module adds experimental support for USB bus. Currently work in progress and will not fully work. +config ATH10K_SNOC + tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)" + depends on ATH10K && ARCH_QCOM + ---help--- + This module adds support for integrated WCN3990 chip connected + to system NOC(SNOC). Currently work in progress and will not + fully work. + config ATH10K_DEBUG bool "Atheros ath10k debugging" depends on ATH10K diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 6739ac26fd29..44d60a61b242 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -22,10 +22,10 @@ ath10k_core-$(CONFIG_THERMAL) += thermal.o ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o ath10k_core-$(CONFIG_PM) += wow.o ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o +ath10k_core-$(CONFIG_ATH10K_CE) += ce.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o -ath10k_pci-y += pci.o \ - ce.o +ath10k_pci-y += pci.o ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o @@ -35,5 +35,8 @@ ath10k_sdio-y += sdio.o obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o ath10k_usb-y += usb.o +obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o +ath10k_snoc-y += snoc.o + # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 35d10490f6c3..fa39ffffd34d 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -180,14 +180,11 @@ static void ath10k_ahb_clock_disable(struct ath10k *ar) { struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); - if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk)) - clk_disable_unprepare(ar_ahb->cmd_clk); + clk_disable_unprepare(ar_ahb->cmd_clk); - if (!IS_ERR_OR_NULL(ar_ahb->ref_clk)) - clk_disable_unprepare(ar_ahb->ref_clk); + clk_disable_unprepare(ar_ahb->ref_clk); - if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk)) - clk_disable_unprepare(ar_ahb->rtc_clk); + clk_disable_unprepare(ar_ahb->rtc_clk); } static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index b9def7bace2f..3b96a43fbda4 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -58,6 +59,74 @@ * the buffer is sent/received. */ +static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + u32 ce_id = ce_state->id; + u32 addr = 0; + + switch (ce_id) { + case 0: + addr = 0x00032000; + break; + case 3: + addr = 0x0003200C; + break; + case 4: + addr = 0x00032010; + break; + case 5: + addr = 0x00032014; + break; + case 7: + addr = 0x0003201C; + break; + default: + ath10k_warn(ar, "invalid CE id: %d", ce_id); + break; + } + return addr; +} + +static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state) +{ + u32 ce_id = ce_state->id; + u32 addr = 0; + + switch (ce_id) { + case 1: + addr = 0x00032034; + break; + case 2: + addr = 0x00032038; + break; + case 5: + addr = 0x00032044; + break; + case 7: + addr = 0x0003204C; + break; + case 8: + addr = 0x00032050; + break; + case 9: + addr = 0x00032054; + break; + case 10: + addr = 0x00032058; + break; + case 11: + addr = 0x0003205C; + break; + default: + ath10k_warn(ar, "invalid CE id: %d", ce_id); + break; + } + + return addr; +} + static inline unsigned int ath10k_set_ring_byte(unsigned int offset, struct ath10k_hw_ce_regs_addr_map *addr_map) @@ -116,11 +185,46 @@ static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, ar->hw_ce_regs->sr_wr_index_addr); } +static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar, + u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK; +} + static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_ce_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_srri_addr); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_srri_addr); + + return index; +} + +static inline void +ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state, + unsigned int value) +{ + ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value); +} + +static inline void +ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, + struct ath10k_ce_pipe *ce_state, + unsigned int value) +{ + ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value); } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, @@ -181,11 +285,31 @@ static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, ath10k_set_ring_byte(n, ctrl_regs->dst_ring)); } +static inline + u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) & + CE_DDR_RRI_MASK; +} + static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, u32 ce_ctrl_addr) { - return ath10k_ce_read32(ar, ce_ctrl_addr + - ar->hw_ce_regs->current_drri_addr); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 index; + + if (ar->hw_params.rri_on_ddr && + (ce_state->attr_flags & CE_ATTR_DIS_INTR)) + index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id); + else + index = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->current_drri_addr); + + return index; } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, @@ -376,8 +500,14 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ - if (!(flags & CE_SEND_FLAG_GATHER)) - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (ar->hw_params.shadow_reg_support) + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } src_ring->write_index = write_index; exit: @@ -395,7 +525,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ce_desc_64 *desc, sdesc; unsigned int nentries_mask = src_ring->nentries_mask; - unsigned int sw_index = src_ring->sw_index; + unsigned int sw_index; unsigned int write_index = src_ring->write_index; u32 ctrl_addr = ce_state->ctrl_addr; __le32 *addr; @@ -409,6 +539,11 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); + if (ar->hw_params.rri_on_ddr) + sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id); + else + sw_index = src_ring->sw_index; + if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; @@ -464,6 +599,7 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context, buffer, nbytes, transfer_id, flags); } +EXPORT_SYMBOL(ath10k_ce_send_nolock); void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) { @@ -491,6 +627,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe) src_ring->per_transfer_context[src_ring->write_index] = NULL; } +EXPORT_SYMBOL(__ath10k_ce_send_revert); int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, @@ -510,6 +647,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_send); int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) { @@ -525,6 +663,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe) return delta; } +EXPORT_SYMBOL(ath10k_ce_num_free_src_entries); int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) { @@ -539,6 +678,7 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe) return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); } +EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs); static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, dma_addr_t paddr) @@ -615,13 +755,14 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries) /* Prevent CE ring stuck issue that will occur when ring is full. * Make sure that write index is 1 less than read index. */ - if ((cur_write_idx + nentries) == dest_ring->sw_index) + if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index) nentries -= 1; write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries); ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); dest_ring->write_index = write_index; } +EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx); int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, dma_addr_t paddr) @@ -636,6 +777,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, return ret; } +EXPORT_SYMBOL(ath10k_ce_rx_post_buf); /* * Guts of ath10k_ce_completed_recv_next. @@ -748,6 +890,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, per_transfer_ctx, nbytesp); } +EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock); int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, @@ -766,6 +909,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_completed_recv_next); static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp, @@ -882,6 +1026,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, per_transfer_contextp, bufferp); } +EXPORT_SYMBOL(ath10k_ce_revoke_recv_next); /* * Guts of ath10k_ce_completed_send_next. @@ -915,7 +1060,10 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, src_ring->hw_index = read_index; } - read_index = src_ring->hw_index; + if (ar->hw_params.rri_on_ddr) + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + else + read_index = src_ring->hw_index; if (read_index == sw_index) return -EIO; @@ -936,6 +1084,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, return 0; } +EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock); static void ath10k_ce_extract_desc_data(struct ath10k *ar, struct ath10k_ce_ring *src_ring, @@ -1025,6 +1174,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_cancel_send_next); int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, void **per_transfer_contextp) @@ -1040,6 +1190,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, return ret; } +EXPORT_SYMBOL(ath10k_ce_completed_send_next); /* * Guts of interrupt handler for per-engine interrupts on a particular CE. @@ -1078,6 +1229,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) spin_unlock_bh(&ce->ce_lock); } +EXPORT_SYMBOL(ath10k_ce_per_engine_service); /* * Handler for per-engine interrupts on ALL active CEs. @@ -1102,6 +1254,7 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar) ath10k_ce_per_engine_service(ar, ce_id); } } +EXPORT_SYMBOL(ath10k_ce_per_engine_service_any); /* * Adjust interrupts for the copy complete handler. @@ -1139,6 +1292,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) return 0; } +EXPORT_SYMBOL(ath10k_ce_disable_interrupts); void ath10k_ce_enable_interrupts(struct ath10k *ar) { @@ -1154,6 +1308,7 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar) ath10k_ce_per_engine_handler_adjust(ce_state); } } +EXPORT_SYMBOL(ath10k_ce_enable_interrupts); static int ath10k_ce_init_src_ring(struct ath10k *ar, unsigned int ce_id, @@ -1234,6 +1389,22 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, return 0; } +static int ath10k_ce_alloc_shadow_base(struct ath10k *ar, + struct ath10k_ce_ring *src_ring, + u32 nentries) +{ + src_ring->shadow_base_unaligned = kcalloc(nentries, + sizeof(struct ce_desc), + GFP_KERNEL); + if (!src_ring->shadow_base_unaligned) + return -ENOMEM; + + src_ring->shadow_base = (struct ce_desc *) + PTR_ALIGN(src_ring->shadow_base_unaligned, + CE_DESC_RING_ALIGN); + return 0; +} + static struct ath10k_ce_ring * ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, const struct ce_attr *attr) @@ -1241,6 +1412,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, struct ath10k_ce_ring *src_ring; u32 nentries = attr->src_nentries; dma_addr_t base_addr; + int ret; nentries = roundup_pow_of_two(nentries); @@ -1277,6 +1449,19 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, ALIGN(src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } + return src_ring; } @@ -1287,6 +1472,7 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, struct ath10k_ce_ring *src_ring; u32 nentries = attr->src_nentries; dma_addr_t base_addr; + int ret; nentries = roundup_pow_of_two(nentries); @@ -1322,6 +1508,19 @@ ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id, ALIGN(src_ring->base_addr_ce_space_unaligned, CE_DESC_RING_ALIGN); + if (ar->hw_params.shadow_reg_support) { + ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); + if (ret) { + dma_free_coherent(ar->dev, + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), + src_ring->base_addr_owner_space_unaligned, + base_addr); + kfree(src_ring); + return ERR_PTR(ret); + } + } + return src_ring; } @@ -1454,6 +1653,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, return 0; } +EXPORT_SYMBOL(ath10k_ce_init_pipe); static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) { @@ -1479,6 +1679,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id) ath10k_ce_deinit_src_ring(ar, ce_id); ath10k_ce_deinit_dest_ring(ar, ce_id); } +EXPORT_SYMBOL(ath10k_ce_deinit_pipe); static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) { @@ -1486,6 +1687,8 @@ static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * sizeof(struct ce_desc) + @@ -1515,6 +1718,8 @@ static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id) struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; if (ce_state->src_ring) { + if (ar->hw_params.shadow_reg_support) + kfree(ce_state->src_ring->shadow_base_unaligned); dma_free_coherent(ar->dev, (ce_state->src_ring->nentries * sizeof(struct ce_desc_64) + @@ -1545,6 +1750,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id) ce_state->ops->ce_free_pipe(ar, ce_id); } +EXPORT_SYMBOL(ath10k_ce_free_pipe); void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) @@ -1584,6 +1790,7 @@ void ath10k_ce_dump_registers(struct ath10k *ar, spin_unlock_bh(&ce->ce_lock); } +EXPORT_SYMBOL(ath10k_ce_dump_registers); static const struct ath10k_ce_ops ce_ops = { .ce_alloc_src_ring = ath10k_ce_alloc_src_ring, @@ -1680,3 +1887,47 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, return 0; } +EXPORT_SYMBOL(ath10k_ce_alloc_pipe); + +void ath10k_ce_alloc_rri(struct ath10k *ar) +{ + int i; + u32 value; + u32 ctrl1_regs; + u32 ce_base_addr; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + ce->vaddr_rri = dma_alloc_coherent(ar->dev, + (CE_COUNT * sizeof(u32)), + &ce->paddr_rri, GFP_KERNEL); + + if (!ce->vaddr_rri) + return; + + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low, + lower_32_bits(ce->paddr_rri)); + ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, + (upper_32_bits(ce->paddr_rri) & + CE_DESC_FLAGS_GET_MASK)); + + for (i = 0; i < CE_COUNT; i++) { + ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; + ce_base_addr = ath10k_ce_base_address(ar, i); + value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs); + value |= ar->hw_ce_regs->upd->mask; + ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value); + } + + memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32)); +} +EXPORT_SYMBOL(ath10k_ce_alloc_rri); + +void ath10k_ce_free_rri(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)), + ce->vaddr_rri, + ce->paddr_rri); +} +EXPORT_SYMBOL(ath10k_ce_free_rri); diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 2c3c8f5e90ea..dbeffaef6024 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -48,6 +49,9 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask #define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb +#define CE_DDR_RRI_MASK GENMASK(15, 0) +#define CE_DDR_DRRI_SHIFT 16 + struct ce_desc { __le32 addr; __le16 nbytes; @@ -113,6 +117,9 @@ struct ath10k_ce_ring { /* CE address space */ u32 base_addr_ce_space; + char *shadow_base_unaligned; + struct ce_desc *shadow_base; + /* keep last */ void *per_transfer_context[0]; }; @@ -153,6 +160,8 @@ struct ath10k_ce { spinlock_t ce_lock; const struct ath10k_bus_ops *bus_ops; struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + u32 *vaddr_rri; + dma_addr_t paddr_rri; }; /*==================Send====================*/ @@ -261,6 +270,8 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar); void ath10k_ce_enable_interrupts(struct ath10k *ar); void ath10k_ce_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data); +void ath10k_ce_alloc_rri(struct ath10k *ar); +void ath10k_ce_free_rri(struct ath10k *ar); /* ce_attr.flags values */ /* Use NonSnooping PCIe accesses? */ @@ -327,6 +338,9 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; } +#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \ + - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS)) + #define CE_SRC_RING_TO_DESC(baddr, idx) \ (&(((struct ce_desc *)baddr)[idx])) @@ -355,14 +369,18 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 +#define CE_INTERRUPT_SUMMARY (GENMASK(CE_COUNT_MAX - 1, 0)) static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) { struct ath10k_ce *ce = ath10k_ce_priv(ar); - return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( - ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + - CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); + if (!ar->hw_params.per_ce_irq) + return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( + ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); + else + return CE_INTERRUPT_SUMMARY; } #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 936907227b9e..ad4f6e3c0737 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -90,6 +90,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA988X_HW_2_0_VERSION, @@ -119,6 +121,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9887_HW_1_0_VERSION, @@ -148,6 +153,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -176,6 +184,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_2_1_VERSION, @@ -204,6 +215,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_3_0_VERSION, @@ -232,6 +246,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA6174_HW_3_2_VERSION, @@ -263,6 +280,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA99X0_HW_2_0_DEV_VERSION, @@ -297,6 +317,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9984_HW_1_0_DEV_VERSION, @@ -336,6 +359,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9888_HW_2_0_DEV_VERSION, @@ -374,6 +400,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9377_HW_1_0_DEV_VERSION, @@ -402,6 +431,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA9377_HW_1_1_DEV_VERSION, @@ -432,6 +464,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = QCA4019_HW_1_0_DEV_VERSION, @@ -467,6 +502,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = 0x20, .target_64bit = false, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL, + .per_ce_irq = false, + .shadow_reg_support = false, + .rri_on_ddr = false, }, { .id = WCN3990_HW_1_0_DEV_VERSION, @@ -487,6 +525,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES, .target_64bit = true, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, + .per_ce_irq = true, + .shadow_reg_support = true, + .rri_on_ddr = true, }, }; @@ -1253,14 +1294,61 @@ out: return ret; } +static int ath10k_core_search_bd(struct ath10k *ar, + const char *boardname, + const u8 *data, + size_t len) +{ + size_t ie_len; + struct ath10k_fw_ie *hdr; + int ret = -ENOENT, ie_id; + + while (len > sizeof(struct ath10k_fw_ie)) { + hdr = (struct ath10k_fw_ie *)data; + ie_id = le32_to_cpu(hdr->id); + ie_len = le32_to_cpu(hdr->len); + + len -= sizeof(*hdr); + data = hdr->data; + + if (len < ALIGN(ie_len, 4)) { + ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", + ie_id, ie_len, len); + return -EINVAL; + } + + switch (ie_id) { + case ATH10K_BD_IE_BOARD: + ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, + boardname); + if (ret == -ENOENT) + /* no match found, continue */ + break; + + /* either found or error, so stop searching */ + goto out; + } + + /* jump over the padding */ + ie_len = ALIGN(ie_len, 4); + + len -= ie_len; + data += ie_len; + } + +out: + /* return result of parse_bd_ie_board() or -ENOENT */ + return ret; +} + static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, const char *boardname, + const char *fallback_boardname, const char *filename) { - size_t len, magic_len, ie_len; - struct ath10k_fw_ie *hdr; + size_t len, magic_len; const u8 *data; - int ret, ie_id; + int ret; ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, @@ -1298,69 +1386,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar, data += magic_len; len -= magic_len; - while (len > sizeof(struct ath10k_fw_ie)) { - hdr = (struct ath10k_fw_ie *)data; - ie_id = le32_to_cpu(hdr->id); - ie_len = le32_to_cpu(hdr->len); - - len -= sizeof(*hdr); - data = hdr->data; - - if (len < ALIGN(ie_len, 4)) { - ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n", - ie_id, ie_len, len); - ret = -EINVAL; - goto err; - } - - switch (ie_id) { - case ATH10K_BD_IE_BOARD: - ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len, - boardname); - if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') { - /* try default bdf if variant was not found */ - char *s, *v = ",variant="; - char boardname2[100]; - - strlcpy(boardname2, boardname, - sizeof(boardname2)); - - s = strstr(boardname2, v); - if (s) - *s = '\0'; /* strip ",variant=%s" */ - - ret = ath10k_core_parse_bd_ie_board(ar, data, - ie_len, - boardname2); - } - - if (ret == -ENOENT) - /* no match found, continue */ - break; - else if (ret) - /* there was an error, bail out */ - goto err; + /* attempt to find boardname in the IE list */ + ret = ath10k_core_search_bd(ar, boardname, data, len); - /* board data found */ - goto out; - } + /* if we didn't find it and have a fallback name, try that */ + if (ret == -ENOENT && fallback_boardname) + ret = ath10k_core_search_bd(ar, fallback_boardname, data, len); - /* jump over the padding */ - ie_len = ALIGN(ie_len, 4); - - len -= ie_len; - data += ie_len; - } - -out: - if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) { + if (ret == -ENOENT) { ath10k_err(ar, "failed to fetch board data for %s from %s/%s\n", boardname, ar->hw_params.fw.dir, filename); ret = -ENODATA; - goto err; } + if (ret) + goto err; + return 0; err: @@ -1369,12 +1411,12 @@ err: } static int ath10k_core_create_board_name(struct ath10k *ar, char *name, - size_t name_len) + size_t name_len, bool with_variant) { /* strlen(',variant=') + strlen(ar->id.bdf_ext) */ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 }; - if (ar->id.bdf_ext[0] != '\0') + if (with_variant && ar->id.bdf_ext[0] != '\0') scnprintf(variant, sizeof(variant), ",variant=%s", ar->id.bdf_ext); @@ -1400,17 +1442,26 @@ out: static int ath10k_core_fetch_board_file(struct ath10k *ar) { - char boardname[100]; + char boardname[100], fallback_boardname[100]; int ret; - ret = ath10k_core_create_board_name(ar, boardname, sizeof(boardname)); + ret = ath10k_core_create_board_name(ar, boardname, + sizeof(boardname), true); if (ret) { ath10k_err(ar, "failed to create board name: %d", ret); return ret; } + ret = ath10k_core_create_board_name(ar, fallback_boardname, + sizeof(boardname), false); + if (ret) { + ath10k_err(ar, "failed to create fallback board name: %d", ret); + return ret; + } + ar->bd_api = 2; ret = ath10k_core_fetch_board_data_api_n(ar, boardname, + fallback_boardname, ATH10K_BOARD_API2_FILE); if (!ret) goto success; @@ -2472,6 +2523,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar) ar->hw->wiphy->hw_version = target_info.version; break; case ATH10K_BUS_SNOC: + memset(&target_info, 0, sizeof(target_info)); + ret = ath10k_hif_get_target_info(ar, &target_info); + if (ret) { + ath10k_err(ar, "could not get target info (%d)\n", ret); + goto err_power_down; + } + ar->target_version = target_info.version; + ar->hw->wiphy->hw_version = target_info.version; break; default: ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index c17d805d68cc..951dbdd1c9eb 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -44,6 +44,7 @@ #define WO(_f) ((_f##_OFFSET) >> 2) #define ATH10K_SCAN_ID 0 +#define ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* msec */ #define WMI_READY_TIMEOUT (5 * HZ) #define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ) #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ) @@ -52,6 +53,8 @@ /* Antenna noise floor */ #define ATH10K_DEFAULT_NOISE_FLOOR -95 +#define ATH10K_INVALID_RSSI 128 + #define ATH10K_MAX_NUM_MGMT_PENDING 128 /* number of failed packets (20 packets with 16 sw reties each) */ @@ -173,6 +176,7 @@ struct ath10k_wmi { struct completion service_ready; struct completion unified_ready; struct completion barrier; + struct completion radar_confirm; wait_queue_head_t tx_credits_wq; DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX); struct wmi_cmd_map *cmd; @@ -375,6 +379,21 @@ struct ath10k_dfs_stats { u32 radar_detected; }; +enum ath10k_radar_confirmation_state { + ATH10K_RADAR_CONFIRMATION_IDLE = 0, + ATH10K_RADAR_CONFIRMATION_INPROGRESS, + ATH10K_RADAR_CONFIRMATION_STOPPED, +}; + +struct ath10k_radar_found_info { + u32 pri_min; + u32 pri_max; + u32 width_min; + u32 width_max; + u32 sidx_min; + u32 sidx_max; +}; + #define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */ struct ath10k_peer { @@ -1107,6 +1126,11 @@ struct ath10k { u32 sta_tid_stats_mask; + /* protected by data_lock */ + enum ath10k_radar_confirmation_state radar_conf_state; + struct ath10k_radar_found_info last_radar_info; + struct work_struct radar_confirmation_work; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index f90cec0ebb1c..4d28063052fe 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -701,6 +701,89 @@ static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = { }, }; +static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x60000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x98000, + .len = 0x50000, + .name = "IRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOSRAM, + .start = 0xC0000, + .len = 0x40000, + .name = "SRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x30000, + .len = 0x7000, + .name = "APB REG 1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x3f000, + .len = 0x3000, + .name = "APB REG 2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x43000, + .len = 0x3000, + .name = "WIFI REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x4A000, + .len = 0x5000, + .name = "CE REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x80000, + .len = 0x6000, + .name = "SOC REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, +}; + static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, @@ -848,6 +931,21 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { .size = ARRAY_SIZE(qca9984_hw10_mem_regions), }, }, + { + .hw_id = QCA9888_HW_2_0_DEV_VERSION, + .region_table = { + .regions = qca9984_hw10_mem_regions, + .size = ARRAY_SIZE(qca9984_hw10_mem_regions), + }, + }, + { + .hw_id = QCA99X0_HW_2_0_DEV_VERSION, + .region_table = { + .regions = qca99x0_hw20_mem_regions, + .size = ARRAY_SIZE(qca99x0_hw20_mem_regions), + }, + }, + }; static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index bac832ce1873..0d98c93a3aba 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -987,13 +987,13 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file, { struct ath10k *ar = file->private_data; int res; - char buf[64]; + char buf[64] = {0}; unsigned int amsdu, ampdu; - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); - - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = 0; + res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (res <= 0) + return res; res = sscanf(buf, "%u %u", &amsdu, &du); @@ -1043,14 +1043,14 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, { struct ath10k *ar = file->private_data; int ret; - char buf[96]; + char buf[96] = {0}; unsigned int log_level; u64 mask; - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); - - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = 0; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%llx %u", &mask, &log_level); @@ -1519,7 +1519,13 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats, *len += scnprintf(buf + *len, buf_len - *len, "********************************\n"); *len += scnprintf(buf + *len, buf_len - *len, - "No. Preamble Rate_code tpc_value1 tpc_value2 tpc_value3\n"); + "No. Preamble Rate_code "); + + for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++) + *len += scnprintf(buf + *len, buf_len - *len, + "tpc_value%d ", i); + + *len += scnprintf(buf + *len, buf_len - *len, "\n"); for (i = 0; i < tpc_stats->rate_max; i++) { *len += scnprintf(buf + *len, buf_len - *len, diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 8f688f136c22..a63c97e2c50c 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -254,12 +254,12 @@ static ssize_t ath10k_dbg_sta_write_addba(struct file *file, struct ath10k *ar = arsta->arvif->ar; u32 tid, buf_size; int ret; - char buf[64]; + char buf[64] = {0}; - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); - - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = '\0'; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%u %u", &tid, &buf_size); if (ret != 2) @@ -305,12 +305,12 @@ static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file, struct ath10k *ar = arsta->arvif->ar; u32 tid, status; int ret; - char buf[64]; - - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + char buf[64] = {0}; - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = '\0'; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%u %u", &tid, &status); if (ret != 2) @@ -355,12 +355,12 @@ static ssize_t ath10k_dbg_sta_write_delba(struct file *file, struct ath10k *ar = arsta->arvif->ar; u32 tid, initiator, reason; int ret; - char buf[64]; - - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); + char buf[64] = {0}; - /* make sure that buf is null terminated */ - buf[sizeof(buf) - 1] = '\0'; + ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, + user_buf, count); + if (ret <= 0) + return ret; ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason); if (ret != 3) diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 6da4e3369c5a..1a59ea0068c2 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -20,13 +20,14 @@ #include <linux/kernel.h> #include "core.h" +#include "bmi.h" #include "debug.h" struct ath10k_hif_sg_item { u16 transfer_id; void *transfer_context; /* NULL = tx completion callback not called */ void *vaddr; /* for debugging mostly */ - u32 paddr; + dma_addr_t paddr; u16 len; }; @@ -93,6 +94,9 @@ struct ath10k_hif_ops { /* fetch calibration data from target eeprom */ int (*fetch_cal_eeprom)(struct ath10k *ar, void **data, size_t *data_len); + + int (*get_target_info)(struct ath10k *ar, + struct bmi_target_info *target_info); }; static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -218,4 +222,13 @@ static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar, return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len); } +static inline int ath10k_hif_get_target_info(struct ath10k *ar, + struct bmi_target_info *tgt_info) +{ + if (!ar->hif.ops->get_target_info) + return -EOPNOTSUPP; + + return ar->hif.ops->get_target_info(ar, tgt_info); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 492dc5b4bbf2..8902720b4e49 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -542,8 +542,14 @@ static const char *htc_service_name(enum ath10k_htc_svc_id id) return "NMI Data"; case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG: + return "HTT Data"; + case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG: + return "HTT Data"; case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS: return "RAW"; + case ATH10K_HTC_SVC_ID_HTT_LOG_MSG: + return "PKTLOG"; } return "Unknown"; diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index a2f8814b3e53..34877597dd6a 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -248,6 +248,7 @@ enum ath10k_htc_svc_gid { ATH10K_HTC_SVC_GRP_WMI = 1, ATH10K_HTC_SVC_GRP_NMI = 2, ATH10K_HTC_SVC_GRP_HTT = 3, + ATH10K_LOG_SERVICE_GROUP = 6, ATH10K_HTC_SVC_GRP_TEST = 254, ATH10K_HTC_SVC_GRP_LAST = 255, @@ -273,6 +274,9 @@ enum ath10k_htc_svc_id { ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0), + ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1), + ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2), + ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0), /* raw stream service (i.e. flash, tcmd, calibration apps) */ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0), }; diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 625198dea18b..21a67f82f037 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -257,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt) return status; } - status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); + status = ath10k_htt_send_frag_desc_bank_cfg(htt); if (status) return status; - status = htt->tx_ops->htt_send_rx_ring_cfg(htt); + status = ath10k_htt_send_rx_ring_cfg(htt); if (status) { ath10k_warn(ar, "failed to setup rx ring: %d\n", status); diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 8cc2a8b278e4..5d3ff80f3a1f 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -127,6 +128,19 @@ struct htt_msdu_ext_desc_64 { | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE) +#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17) +#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19) +#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20) +#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21) + +#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \ + | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64) + enum htt_data_tx_desc_flags0 { HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0, HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1, @@ -533,12 +547,18 @@ struct htt_ver_resp { u8 rsvd0; } __packed; +#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0) + +#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0) + struct htt_mgmt_tx_completion { u8 rsvd0; u8 rsvd1; - u8 rsvd2; + u8 flags; __le32 desc_id; __le32 status; + __le32 ppdu_id; + __le32 info; } __packed; #define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) @@ -1648,6 +1668,7 @@ struct htt_resp { struct htt_tx_done { u16 msdu_id; u16 status; + u8 ack_rssi; }; enum htt_tx_compl_state { @@ -1848,6 +1869,57 @@ struct ath10k_htt_tx_ops { void (*htt_free_txbuff)(struct ath10k_htt *htt); }; +static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_send_rx_ring_cfg) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_send_rx_ring_cfg(htt); +} + +static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_send_frag_desc_bank_cfg) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt); +} + +static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_alloc_frag_desc) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_alloc_frag_desc(htt); +} + +static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_free_frag_desc) + htt->tx_ops->htt_free_frag_desc(htt); +} + +static inline int ath10k_htt_tx(struct ath10k_htt *htt, + enum ath10k_hw_txrx_mode txmode, + struct sk_buff *msdu) +{ + return htt->tx_ops->htt_tx(htt, txmode, msdu); +} + +static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt) +{ + if (!htt->tx_ops->htt_alloc_txbuff) + return -EOPNOTSUPP; + + return htt->tx_ops->htt_alloc_txbuff(htt); +} + +static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt) +{ + if (htt->tx_ops->htt_free_txbuff) + htt->tx_ops->htt_free_txbuff(htt); +} + struct ath10k_htt_rx_ops { size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt); void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr); @@ -1857,6 +1929,43 @@ struct ath10k_htt_rx_ops { void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx); }; +static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt) +{ + if (!htt->rx_ops->htt_get_rx_ring_size) + return 0; + + return htt->rx_ops->htt_get_rx_ring_size(htt); +} + +static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt, + void *vaddr) +{ + if (htt->rx_ops->htt_config_paddrs_ring) + htt->rx_ops->htt_config_paddrs_ring(htt, vaddr); +} + +static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt, + dma_addr_t paddr, + int idx) +{ + if (htt->rx_ops->htt_set_paddrs_ring) + htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); +} + +static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt) +{ + if (!htt->rx_ops->htt_get_vaddr_ring) + return NULL; + + return htt->rx_ops->htt_get_vaddr_ring(htt); +} + +static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx) +{ + if (htt->rx_ops->htt_reset_paddrs_ring) + htt->rx_ops->htt_reset_paddrs_ring(htt, idx); +} + #define RX_HTT_HDR_STATUS_LEN 64 /* This structure layout is programmed via rx ring setup diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 5e02e26158f6..bd23f6940488 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -25,6 +25,7 @@ #include "mac.h" #include <linux/log2.h> +#include <linux/bitfield.h> /* when under memory pressure rx ring refill may fail and needs a retry */ #define HTT_RX_RING_REFILL_RETRY_MS 50 @@ -181,7 +182,7 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num) rxcb = ATH10K_SKB_RXCB(skb); rxcb->paddr = paddr; htt->rx_ring.netbufs_ring[idx] = skb; - htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx); + ath10k_htt_set_paddrs_ring(htt, paddr, idx); htt->rx_ring.fill_cnt++; if (htt->rx_ring.in_ord_rx) { @@ -286,8 +287,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) ath10k_htt_rx_ring_free(htt); dma_free_coherent(htt->ar->dev, - htt->rx_ops->htt_get_rx_ring_size(htt), - htt->rx_ops->htt_get_vaddr_ring(htt), + ath10k_htt_get_rx_ring_size(htt), + ath10k_htt_get_vaddr_ring(htt), htt->rx_ring.base_paddr); dma_free_coherent(htt->ar->dev, @@ -314,7 +315,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt) idx = htt->rx_ring.sw_rd_idx.msdu_payld; msdu = htt->rx_ring.netbufs_ring[idx]; htt->rx_ring.netbufs_ring[idx] = NULL; - htt->rx_ops->htt_reset_paddrs_ring(htt, idx); + ath10k_htt_reset_paddrs_ring(htt, idx); idx++; idx &= htt->rx_ring.size_mask; @@ -586,13 +587,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) if (!htt->rx_ring.netbufs_ring) goto err_netbuf; - size = htt->rx_ops->htt_get_rx_ring_size(htt); + size = ath10k_htt_get_rx_ring_size(htt); vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL); if (!vaddr_ring) goto err_dma_ring; - htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring); + ath10k_htt_config_paddrs_ring(htt, vaddr_ring); htt->rx_ring.base_paddr = paddr; vaddr = dma_alloc_coherent(htt->ar->dev, @@ -626,7 +627,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) err_dma_idx: dma_free_coherent(htt->ar->dev, - htt->rx_ops->htt_get_rx_ring_size(htt), + ath10k_htt_get_rx_ring_size(htt), vaddr_ring, htt->rx_ring.base_paddr); err_dma_ring: @@ -2719,12 +2720,21 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: { struct htt_tx_done tx_done = {}; int status = __le32_to_cpu(resp->mgmt_tx_completion.status); + int info = __le32_to_cpu(resp->mgmt_tx_completion.info); tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id); switch (status) { case HTT_MGMT_TX_STATUS_OK: tx_done.status = HTT_TX_COMPL_STATE_ACK; + if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + ar->wmi.svc_map) && + (resp->mgmt_tx_completion.flags & + HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) { + tx_done.ack_rssi = + FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK, + info); + } break; case HTT_MGMT_TX_STATUS_RETRY: tx_done.status = HTT_TX_COMPL_STATE_NOACK; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index d334b7be1fea..5d8b97a0ccaa 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -443,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; int ret; - ret = htt->tx_ops->htt_alloc_txbuff(htt); + ret = ath10k_htt_alloc_txbuff(htt); if (ret) { ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); return ret; } - ret = htt->tx_ops->htt_alloc_frag_desc(htt); + ret = ath10k_htt_alloc_frag_desc(htt); if (ret) { ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; @@ -473,10 +473,10 @@ free_txq: ath10k_htt_tx_free_txq(htt); free_frag_desc: - htt->tx_ops->htt_free_frag_desc(htt); + ath10k_htt_free_frag_desc(htt); free_txbuf: - htt->tx_ops->htt_free_txbuff(htt); + ath10k_htt_free_txbuff(htt); return ret; } @@ -530,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt) if (!htt->tx_mem_allocated) return; - htt->tx_ops->htt_free_txbuff(htt); + ath10k_htt_free_txbuff(htt); ath10k_htt_tx_free_txq(htt); - htt->tx_ops->htt_free_frag_desc(htt); + ath10k_htt_free_frag_desc(htt); ath10k_htt_tx_free_txdone_fifo(htt); htt->tx_mem_allocated = false; } @@ -1475,8 +1475,11 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; - if (ar->hw_params.continuous_frag_desc) - ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; + if (ar->hw_params.continuous_frag_desc) { + memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); + ext_desc->tso_flag[3] |= + __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); + } } /* Prevent firmware from sending up tx inspection requests. There's diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 497ac33e0fbf..677535b3d207 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -310,6 +310,12 @@ static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { .wm_high = &wcn3990_dst_wm_high, }; +static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { + .shift = 19, + .mask = 0x00080000, + .enable = 0x00000000, +}; + const struct ath10k_hw_ce_regs wcn3990_ce_regs = { .sr_base_addr = 0x00000000, .sr_size_addr = 0x00000008, @@ -320,8 +326,6 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = { .dst_wr_index_addr = 0x00000040, .current_srri_addr = 0x00000044, .current_drri_addr = 0x00000048, - .ddr_addr_for_rri_low = 0x00000004, - .ddr_addr_for_rri_high = 0x00000008, .ce_rri_low = 0x0024C004, .ce_rri_high = 0x0024C008, .host_ie_addr = 0x0000002c, @@ -331,6 +335,7 @@ const struct ath10k_hw_ce_regs wcn3990_ce_regs = { .misc_regs = &wcn3990_misc_reg, .wm_srcr = &wcn3990_wm_src_ring, .wm_dstr = &wcn3990_wm_dst_ring, + .upd = &wcn3990_ctrl1_upd, }; const struct ath10k_hw_values wcn3990_values = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 413b1b4321f7..23467e9fefeb 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -84,11 +85,11 @@ enum qca9377_chip_id_rev { QCA9377_HW_1_1_CHIP_ID_REV = 0x1, }; -#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" +#define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1" #define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234 -#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0" +#define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0" #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin" #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234 @@ -131,7 +132,7 @@ enum qca9377_chip_id_rev { /* WCN3990 1.0 definitions */ #define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990 -#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw3.0" +#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0" #define ATH10K_FW_FILE_BASE "firmware" #define ATH10K_FW_API_MAX 6 @@ -335,6 +336,12 @@ struct ath10k_hw_ce_dst_src_wm_regs { struct ath10k_hw_ce_regs_addr_map *wm_low; struct ath10k_hw_ce_regs_addr_map *wm_high; }; +struct ath10k_hw_ce_ctrl1_upd { + u32 shift; + u32 mask; + u32 enable; +}; + struct ath10k_hw_ce_regs { u32 sr_base_addr; u32 sr_size_addr; @@ -357,7 +364,9 @@ struct ath10k_hw_ce_regs { struct ath10k_hw_ce_cmd_halt *cmd_halt; struct ath10k_hw_ce_host_ie *host_ie; struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr; - struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; }; + struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; + struct ath10k_hw_ce_ctrl1_upd *upd; +}; struct ath10k_hw_values { u32 rtc_state_val_on; @@ -568,6 +577,15 @@ struct ath10k_hw_params { /* Target rx ring fill level */ u32 rx_ring_fill_level; + + /* target supporting per ce IRQ */ + bool per_ce_irq; + + /* target supporting shadow register for ce write */ + bool shadow_reg_support; + + /* target supporting retention restore on ddr */ + bool rri_on_ddr; }; struct htt_rx_desc; diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index bf05a3689558..e9c2fb318c03 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3217,6 +3217,15 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); } +static void ath10k_stop_radar_confirmation(struct ath10k *ar) +{ + spin_lock_bh(&ar->data_lock); + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED; + spin_unlock_bh(&ar->data_lock); + + cancel_work_sync(&ar->radar_confirmation_work); +} + /***************/ /* TX handlers */ /***************/ @@ -3598,7 +3607,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar, switch (txpath) { case ATH10K_MAC_TX_HTT: - ret = htt->tx_ops->htt_tx(htt, txmode, skb); + ret = ath10k_htt_tx(htt, txmode, skb); break; case ATH10K_MAC_TX_HTT_MGMT: ret = ath10k_htt_mgmt_tx(htt, skb); @@ -4290,7 +4299,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { ret = ath10k_mac_tx_push_txq(hw, f_txq); - if (ret) + if (ret < 0) break; } if (ret != -ENOENT) @@ -4333,6 +4342,7 @@ void ath10k_halt(struct ath10k *ar) ath10k_scan_finish(ar); ath10k_peer_cleanup_all(ar); + ath10k_stop_radar_confirmation(ar); ath10k_core_stop(ar); ath10k_hif_power_down(ar); @@ -4679,6 +4689,13 @@ static int ath10k_start(struct ieee80211_hw *hw) } } + param = ar->wmi.pdev_param->idle_ps_config; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); + goto err_core_stop; + } + __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); /* @@ -4751,6 +4768,8 @@ static int ath10k_start(struct ieee80211_hw *hw) ath10k_spectral_start(ar); ath10k_thermal_set_throttling(ar); + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; + mutex_unlock(&ar->conf_mutex); return 0; @@ -5668,6 +5687,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, struct wmi_start_scan_arg arg; int ret = 0; int i; + u32 scan_timeout; mutex_lock(&ar->conf_mutex); @@ -5717,12 +5737,34 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; } + if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; + ether_addr_copy(arg.mac_addr.addr, req->mac_addr); + ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); + } + if (req->n_channels) { arg.n_channels = req->n_channels; for (i = 0; i < arg.n_channels; i++) arg.channels[i] = req->channels[i]->center_freq; } + /* if duration is set, default dwell times will be overwritten */ + if (req->duration) { + arg.dwell_time_active = req->duration; + arg.dwell_time_passive = req->duration; + arg.burst_duration_ms = req->duration; + + scan_timeout = min_t(u32, arg.max_rest_time * + (arg.n_channels - 1) + (req->duration + + ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * + arg.n_channels, arg.max_scan_time + 200); + + } else { + /* Add a 200ms margin to account for event/command processing */ + scan_timeout = arg.max_scan_time + 200; + } + ret = ath10k_start_scan(ar, &arg); if (ret) { ath10k_warn(ar, "failed to start hw scan: %d\n", ret); @@ -5731,10 +5773,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); } - /* Add a 200ms margin to account for event/command processing */ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, - msecs_to_jiffies(arg.max_scan_time + - 200)); + msecs_to_jiffies(scan_timeout)); exit: mutex_unlock(&ar->conf_mutex); @@ -8351,6 +8391,8 @@ int ath10k_mac_register(struct ath10k *ar) } wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL); /* * on LL hardware queues are managed entirely by the FW @@ -8433,6 +8475,17 @@ int ath10k_mac_register(struct ath10k *ar) goto err_dfs_detector_exit; } + if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { + ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); + if (ret) { + ath10k_err(ar, "failed to set prob req oui: %i\n", ret); + goto err_dfs_detector_exit; + } + + ar->hw->wiphy->features |= + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; + } + ar->hw->wiphy->cipher_suites = cipher_suites; /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index fd1566cd7d2b..af2cf55c4c1e 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1383,8 +1383,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, for (i = 0; i < n_items - 1; i++) { ath10k_dbg(ar, ATH10K_DBG_PCI, - "pci tx item %d paddr 0x%08x len %d n_items %d\n", - i, items[i].paddr, items[i].len, n_items); + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); @@ -1401,8 +1401,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, /* `i` is equal to `n_items -1` after for() */ ath10k_dbg(ar, ATH10K_DBG_PCI, - "pci tx item %d paddr 0x%08x len %d n_items %d\n", - i, items[i].paddr, items[i].len, n_items); + "pci tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 545deb6d7af1..ea4075d456fa 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -18,39 +18,41 @@ #ifndef _RX_DESC_H_ #define _RX_DESC_H_ +#include <linux/bitops.h> + enum rx_attention_flags { - RX_ATTENTION_FLAGS_FIRST_MPDU = 1 << 0, - RX_ATTENTION_FLAGS_LAST_MPDU = 1 << 1, - RX_ATTENTION_FLAGS_MCAST_BCAST = 1 << 2, - RX_ATTENTION_FLAGS_PEER_IDX_INVALID = 1 << 3, - RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = 1 << 4, - RX_ATTENTION_FLAGS_POWER_MGMT = 1 << 5, - RX_ATTENTION_FLAGS_NON_QOS = 1 << 6, - RX_ATTENTION_FLAGS_NULL_DATA = 1 << 7, - RX_ATTENTION_FLAGS_MGMT_TYPE = 1 << 8, - RX_ATTENTION_FLAGS_CTRL_TYPE = 1 << 9, - RX_ATTENTION_FLAGS_MORE_DATA = 1 << 10, - RX_ATTENTION_FLAGS_EOSP = 1 << 11, - RX_ATTENTION_FLAGS_U_APSD_TRIGGER = 1 << 12, - RX_ATTENTION_FLAGS_FRAGMENT = 1 << 13, - RX_ATTENTION_FLAGS_ORDER = 1 << 14, - RX_ATTENTION_FLAGS_CLASSIFICATION = 1 << 15, - RX_ATTENTION_FLAGS_OVERFLOW_ERR = 1 << 16, - RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = 1 << 17, - RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = 1 << 18, - RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = 1 << 19, - RX_ATTENTION_FLAGS_SA_IDX_INVALID = 1 << 20, - RX_ATTENTION_FLAGS_DA_IDX_INVALID = 1 << 21, - RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = 1 << 22, - RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = 1 << 23, - RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = 1 << 24, - RX_ATTENTION_FLAGS_DIRECTED = 1 << 25, - RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = 1 << 26, - RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = 1 << 27, - RX_ATTENTION_FLAGS_TKIP_MIC_ERR = 1 << 28, - RX_ATTENTION_FLAGS_DECRYPT_ERR = 1 << 29, - RX_ATTENTION_FLAGS_FCS_ERR = 1 << 30, - RX_ATTENTION_FLAGS_MSDU_DONE = 1 << 31, + RX_ATTENTION_FLAGS_FIRST_MPDU = BIT(0), + RX_ATTENTION_FLAGS_LAST_MPDU = BIT(1), + RX_ATTENTION_FLAGS_MCAST_BCAST = BIT(2), + RX_ATTENTION_FLAGS_PEER_IDX_INVALID = BIT(3), + RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = BIT(4), + RX_ATTENTION_FLAGS_POWER_MGMT = BIT(5), + RX_ATTENTION_FLAGS_NON_QOS = BIT(6), + RX_ATTENTION_FLAGS_NULL_DATA = BIT(7), + RX_ATTENTION_FLAGS_MGMT_TYPE = BIT(8), + RX_ATTENTION_FLAGS_CTRL_TYPE = BIT(9), + RX_ATTENTION_FLAGS_MORE_DATA = BIT(10), + RX_ATTENTION_FLAGS_EOSP = BIT(11), + RX_ATTENTION_FLAGS_U_APSD_TRIGGER = BIT(12), + RX_ATTENTION_FLAGS_FRAGMENT = BIT(13), + RX_ATTENTION_FLAGS_ORDER = BIT(14), + RX_ATTENTION_FLAGS_CLASSIFICATION = BIT(15), + RX_ATTENTION_FLAGS_OVERFLOW_ERR = BIT(16), + RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = BIT(17), + RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = BIT(18), + RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = BIT(19), + RX_ATTENTION_FLAGS_SA_IDX_INVALID = BIT(20), + RX_ATTENTION_FLAGS_DA_IDX_INVALID = BIT(21), + RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = BIT(22), + RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = BIT(23), + RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = BIT(24), + RX_ATTENTION_FLAGS_DIRECTED = BIT(25), + RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = BIT(26), + RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = BIT(27), + RX_ATTENTION_FLAGS_TKIP_MIC_ERR = BIT(28), + RX_ATTENTION_FLAGS_DECRYPT_ERR = BIT(29), + RX_ATTENTION_FLAGS_FCS_ERR = BIT(30), + RX_ATTENTION_FLAGS_MSDU_DONE = BIT(31), }; struct rx_attention { @@ -254,15 +256,15 @@ enum htt_rx_mpdu_encrypt_type { #define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16 #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000 #define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28 -#define RX_MPDU_START_INFO0_FROM_DS (1 << 11) -#define RX_MPDU_START_INFO0_TO_DS (1 << 12) -#define RX_MPDU_START_INFO0_ENCRYPTED (1 << 13) -#define RX_MPDU_START_INFO0_RETRY (1 << 14) -#define RX_MPDU_START_INFO0_TXBF_H_INFO (1 << 15) +#define RX_MPDU_START_INFO0_FROM_DS BIT(11) +#define RX_MPDU_START_INFO0_TO_DS BIT(12) +#define RX_MPDU_START_INFO0_ENCRYPTED BIT(13) +#define RX_MPDU_START_INFO0_RETRY BIT(14) +#define RX_MPDU_START_INFO0_TXBF_H_INFO BIT(15) #define RX_MPDU_START_INFO1_TID_MASK 0xf0000000 #define RX_MPDU_START_INFO1_TID_LSB 28 -#define RX_MPDU_START_INFO1_DIRECTED (1 << 16) +#define RX_MPDU_START_INFO1_DIRECTED BIT(16) struct rx_mpdu_start { __le32 info0; @@ -357,13 +359,13 @@ struct rx_mpdu_start { #define RX_MPDU_END_INFO0_RESERVED_0_LSB 0 #define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000 #define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16 -#define RX_MPDU_END_INFO0_OVERFLOW_ERR (1 << 13) -#define RX_MPDU_END_INFO0_LAST_MPDU (1 << 14) -#define RX_MPDU_END_INFO0_POST_DELIM_ERR (1 << 15) -#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR (1 << 28) -#define RX_MPDU_END_INFO0_TKIP_MIC_ERR (1 << 29) -#define RX_MPDU_END_INFO0_DECRYPT_ERR (1 << 30) -#define RX_MPDU_END_INFO0_FCS_ERR (1 << 31) +#define RX_MPDU_END_INFO0_OVERFLOW_ERR BIT(13) +#define RX_MPDU_END_INFO0_LAST_MPDU BIT(14) +#define RX_MPDU_END_INFO0_POST_DELIM_ERR BIT(15) +#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR BIT(28) +#define RX_MPDU_END_INFO0_TKIP_MIC_ERR BIT(29) +#define RX_MPDU_END_INFO0_DECRYPT_ERR BIT(30) +#define RX_MPDU_END_INFO0_FCS_ERR BIT(31) struct rx_mpdu_end { __le32 info0; @@ -422,12 +424,12 @@ struct rx_mpdu_end { #define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8 #define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000 #define RX_MSDU_START_INFO1_SA_IDX_LSB 16 -#define RX_MSDU_START_INFO1_IPV4_PROTO (1 << 10) -#define RX_MSDU_START_INFO1_IPV6_PROTO (1 << 11) -#define RX_MSDU_START_INFO1_TCP_PROTO (1 << 12) -#define RX_MSDU_START_INFO1_UDP_PROTO (1 << 13) -#define RX_MSDU_START_INFO1_IP_FRAG (1 << 14) -#define RX_MSDU_START_INFO1_TCP_ONLY_ACK (1 << 15) +#define RX_MSDU_START_INFO1_IPV4_PROTO BIT(10) +#define RX_MSDU_START_INFO1_IPV6_PROTO BIT(11) +#define RX_MSDU_START_INFO1_TCP_PROTO BIT(12) +#define RX_MSDU_START_INFO1_UDP_PROTO BIT(13) +#define RX_MSDU_START_INFO1_IP_FRAG BIT(14) +#define RX_MSDU_START_INFO1_TCP_ONLY_ACK BIT(15) #define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff #define RX_MSDU_START_INFO2_DA_IDX_LSB 0 @@ -568,10 +570,10 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0 -#define RX_MSDU_END_INFO0_FIRST_MSDU (1 << 14) -#define RX_MSDU_END_INFO0_LAST_MSDU (1 << 15) -#define RX_MSDU_END_INFO0_PRE_DELIM_ERR (1 << 30) -#define RX_MSDU_END_INFO0_RESERVED_3B (1 << 31) +#define RX_MSDU_END_INFO0_FIRST_MSDU BIT(14) +#define RX_MSDU_END_INFO0_LAST_MSDU BIT(15) +#define RX_MSDU_END_INFO0_PRE_DELIM_ERR BIT(30) +#define RX_MSDU_END_INFO0_RESERVED_3B BIT(31) struct rx_msdu_end_common { __le16 ip_hdr_cksum; @@ -691,7 +693,7 @@ struct rx_msdu_end { #define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C #define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D -#define RX_PPDU_START_INFO0_IS_GREENFIELD (1 << 0) +#define RX_PPDU_START_INFO0_IS_GREENFIELD BIT(0) #define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f #define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0 @@ -701,15 +703,15 @@ struct rx_msdu_end { #define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18 #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000 #define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24 -#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT (1 << 4) -#define RX_PPDU_START_INFO1_L_SIG_PARITY (1 << 17) +#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT BIT(4) +#define RX_PPDU_START_INFO1_L_SIG_PARITY BIT(17) #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff #define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0 #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff #define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0 -#define RX_PPDU_START_INFO3_TXBF_H_INFO (1 << 24) +#define RX_PPDU_START_INFO3_TXBF_H_INFO BIT(24) #define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff #define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0 @@ -898,14 +900,14 @@ struct rx_ppdu_start { * Reserved: HW should fill with 0, FW should ignore. */ -#define RX_PPDU_END_FLAGS_PHY_ERR (1 << 0) -#define RX_PPDU_END_FLAGS_RX_LOCATION (1 << 1) -#define RX_PPDU_END_FLAGS_TXBF_H_INFO (1 << 2) +#define RX_PPDU_END_FLAGS_PHY_ERR BIT(0) +#define RX_PPDU_END_FLAGS_RX_LOCATION BIT(1) +#define RX_PPDU_END_FLAGS_TXBF_H_INFO BIT(2) #define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff #define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0 -#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24) -#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25) +#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK BIT(24) +#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL BIT(25) #define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc #define RX_PPDU_END_INFO1_PEER_IDX_LSB 2 @@ -1265,9 +1267,9 @@ struct rx_ppdu_end { * to 0. */ -#define FW_RX_DESC_INFO0_DISCARD (1 << 0) -#define FW_RX_DESC_INFO0_FORWARD (1 << 1) -#define FW_RX_DESC_INFO0_INSPECT (1 << 5) +#define FW_RX_DESC_INFO0_DISCARD BIT(0) +#define FW_RX_DESC_INFO0_FORWARD BIT(1) +#define FW_RX_DESC_INFO0_INSPECT BIT(5) #define FW_RX_DESC_INFO0_EXT_MASK 0xC0 #define FW_RX_DESC_INFO0_EXT_LSB 6 diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 03a69e5b1116..d612ce8c9cff 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1957,25 +1957,25 @@ static int ath10k_sdio_probe(struct sdio_func *func, ar_sdio = ath10k_sdio_priv(ar); ar_sdio->irq_data.irq_proc_reg = - kzalloc(sizeof(struct ath10k_sdio_irq_proc_regs), - GFP_KERNEL); + devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs), + GFP_KERNEL); if (!ar_sdio->irq_data.irq_proc_reg) { ret = -ENOMEM; goto err_core_destroy; } ar_sdio->irq_data.irq_en_reg = - kzalloc(sizeof(struct ath10k_sdio_irq_enable_regs), - GFP_KERNEL); + devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs), + GFP_KERNEL); if (!ar_sdio->irq_data.irq_en_reg) { ret = -ENOMEM; - goto err_free_proc_reg; + goto err_core_destroy; } - ar_sdio->bmi_buf = kzalloc(BMI_MAX_CMDBUF_SIZE, GFP_KERNEL); + ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL); if (!ar_sdio->bmi_buf) { ret = -ENOMEM; - goto err_free_en_reg; + goto err_core_destroy; } ar_sdio->func = func; @@ -1995,7 +1995,7 @@ static int ath10k_sdio_probe(struct sdio_func *func, ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq"); if (!ar_sdio->workqueue) { ret = -ENOMEM; - goto err_free_bmi_buf; + goto err_core_destroy; } for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++) @@ -2011,7 +2011,7 @@ static int ath10k_sdio_probe(struct sdio_func *func, ret = -ENODEV; ath10k_err(ar, "unsupported device id %u (0x%x)\n", dev_id_base, id->device); - goto err_free_bmi_buf; + goto err_free_wq; } ar->id.vendor = id->vendor; @@ -2040,12 +2040,6 @@ static int ath10k_sdio_probe(struct sdio_func *func, err_free_wq: destroy_workqueue(ar_sdio->workqueue); -err_free_bmi_buf: - kfree(ar_sdio->bmi_buf); -err_free_en_reg: - kfree(ar_sdio->irq_data.irq_en_reg); -err_free_proc_reg: - kfree(ar_sdio->irq_data.irq_proc_reg); err_core_destroy: ath10k_core_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c new file mode 100644 index 000000000000..a3a7042fe13a --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -0,0 +1,1413 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include "debug.h" +#include "hif.h" +#include "htc.h" +#include "ce.h" +#include "snoc.h" +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/clk.h> +#define WCN3990_CE_ATTR_FLAGS 0 +#define ATH10K_SNOC_RX_POST_RETRY_MS 50 +#define CE_POLL_PIPE 4 + +static char *const ce_name[] = { + "WLAN_CE_0", + "WLAN_CE_1", + "WLAN_CE_2", + "WLAN_CE_3", + "WLAN_CE_4", + "WLAN_CE_5", + "WLAN_CE_6", + "WLAN_CE_7", + "WLAN_CE_8", + "WLAN_CE_9", + "WLAN_CE_10", + "WLAN_CE_11", +}; + +static struct ath10k_wcn3990_vreg_info vreg_cfg[] = { + {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false}, + {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false}, + {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false}, + {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false}, +}; + +static struct ath10k_wcn3990_clk_info clk_cfg[] = { + {NULL, "cxo_ref_clk_pin", 0, false}, +}; + +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state); +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); + +static const struct ath10k_snoc_drv_priv drv_priv = { + .hw_rev = ATH10K_HW_WCN3990, + .dma_mask = DMA_BIT_MASK(37), +}; + +static struct ce_attr host_ce_config_wlan[] = { + /* CE0: host->target HTC control streams */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 16, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htc_tx_cb, + }, + + /* CE1: target->host HTT + HTC control */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE2: target->host WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 64, + .recv_cb = ath10k_snoc_htc_rx_cb, + }, + + /* CE3: host->target WMI */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 32, + .src_sz_max = 2048, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htc_tx_cb, + }, + + /* CE4: host->target HTT */ + { + .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, + .src_nentries = 256, + .src_sz_max = 256, + .dest_nentries = 0, + .send_cb = ath10k_snoc_htt_tx_cb, + }, + + /* CE5: target->host HTT (ipa_uc->target ) */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 512, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_rx_cb, + }, + + /* CE6: target autonomous hif_memcpy */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 0, + .dest_nentries = 0, + }, + + /* CE7: ce_diag, the Diagnostic Window */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 2, + .src_sz_max = 2048, + .dest_nentries = 2, + }, + + /* CE8: Target to uMC */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 128, + }, + + /* CE9 target->host HTT */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE10: target->host HTT */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, + + /* CE11: target -> host PKTLOG */ + { + .flags = CE_ATTR_FLAGS, + .src_nentries = 0, + .src_sz_max = 2048, + .dest_nentries = 512, + .recv_cb = ath10k_snoc_htt_htc_rx_cb, + }, +}; + +static struct service_to_pipe target_service_to_ce_map_wlan[] = { + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(3), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(0), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(2), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ + __cpu_to_le32(4), + }, + { + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(1), + }, + { /* not used */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), + __cpu_to_le32(PIPEDIR_OUT), + __cpu_to_le32(5), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(9), + }, + { /* in = DL = target -> host */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(10), + }, + { /* in = DL = target -> host pktlog */ + __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG), + __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ + __cpu_to_le32(11), + }, + /* (Additions here) */ + + { /* must be last */ + __cpu_to_le32(0), + __cpu_to_le32(0), + __cpu_to_le32(0), + }, +}; + +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + iowrite32(value, ar_snoc->mem + offset); +} + +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + u32 val; + + val = ioread32(ar_snoc->mem + offset); + + return val; +} + +static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = dev_alloc_skb(pipe->buf_sz); + if (!skb) + return -ENOMEM; + + WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); + + paddr = dma_map_single(ar->dev, skb->data, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ar->dev, paddr))) { + ath10k_warn(ar, "failed to dma map snoc rx buf\n"); + dev_kfree_skb_any(skb); + return -EIO; + } + + ATH10K_SKB_RXCB(skb)->paddr = paddr; + + spin_lock_bh(&ce->ce_lock); + ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); + spin_unlock_bh(&ce->ce_lock); + if (ret) { + dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + +static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe) +{ + struct ath10k *ar = pipe->hif_ce_state; + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; + int ret, num; + + if (pipe->buf_sz == 0) + return; + + if (!ce_pipe->dest_ring) + return; + + spin_lock_bh(&ce->ce_lock); + num = __ath10k_ce_rx_num_free_bufs(ce_pipe); + spin_unlock_bh(&ce->ce_lock); + while (num--) { + ret = __ath10k_snoc_rx_post_buf(pipe); + if (ret) { + if (ret == -ENOSPC) + break; + ath10k_warn(ar, "failed to post rx buf: %d\n", ret); + mod_timer(&ar_snoc->rx_post_retry, jiffies + + ATH10K_SNOC_RX_POST_RETRY_MS); + break; + } + } +} + +static void ath10k_snoc_rx_post(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int i; + + for (i = 0; i < CE_COUNT; i++) + ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]); +} + +static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state, + void (*callback)(struct ath10k *ar, + struct sk_buff *skb)) +{ + struct ath10k *ar = ce_state->ar; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id]; + struct sk_buff *skb; + struct sk_buff_head list; + void *transfer_context; + unsigned int nbytes, max_nbytes; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, + &nbytes) == 0) { + skb = transfer_context; + max_nbytes = skb->len + skb_tailroom(skb); + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + max_nbytes, DMA_FROM_DEVICE); + + if (unlikely(max_nbytes < nbytes)) { + ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", + nbytes, max_nbytes); + dev_kfree_skb_any(skb); + continue; + } + + skb_put(skb, nbytes); + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n", + ce_state->id, skb->len); + + callback(ar, skb); + } + + ath10k_snoc_rx_post_pipe(pipe_info); +} + +static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + /* CE4 polling needs to be done whenever CE pipe which transports + * HTT Rx (target->host) is processed. + */ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + + ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); +} + +static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) +{ + skb_pull(skb, sizeof(struct ath10k_htc_hdr)); + ath10k_htt_t2h_msg_handler(ar, skb); +} + +static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state) +{ + ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE); + ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver); +} + +static void ath10k_snoc_rx_replenish_retry(struct timer_list *t) +{ + struct ath10k_pci *ar_snoc = from_timer(ar_snoc, t, rx_post_retry); + struct ath10k *ar = ar_snoc->ar; + + ath10k_snoc_rx_post(ar); +} + +static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff_head list; + struct sk_buff *skb; + + __skb_queue_head_init(&list); + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + __skb_queue_tail(&list, skb); + } + + while ((skb = __skb_dequeue(&list))) + ath10k_htc_tx_completion_handler(ar, skb); +} + +static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state) +{ + struct ath10k *ar = ce_state->ar; + struct sk_buff *skb; + + while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { + if (!skb) + continue; + + dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, + skb->len, DMA_TO_DEVICE); + ath10k_htt_hif_tx_complete(ar, skb); + } +} + +static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc_pipe *snoc_pipe; + struct ath10k_ce_pipe *ce_pipe; + int err, i = 0; + + snoc_pipe = &ar_snoc->pipe_info[pipe_id]; + ce_pipe = snoc_pipe->ce_hdl; + spin_lock_bh(&ce->ce_lock); + + for (i = 0; i < n_items - 1; i++) { + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + CE_SEND_FLAG_GATHER); + if (err) + goto err; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snoc tx item %d paddr %pad len %d n_items %d\n", + i, &items[i].paddr, items[i].len, n_items); + + err = ath10k_ce_send_nolock(ce_pipe, + items[i].transfer_context, + items[i].paddr, + items[i].len, + items[i].transfer_id, + 0); + if (err) + goto err; + + spin_unlock_bh(&ce->ce_lock); + + return 0; + +err: + for (; i > 0; i--) + __ath10k_ce_send_revert(ce_pipe); + + spin_unlock_bh(&ce->ce_lock); + return err; +} + +static int ath10k_snoc_hif_get_target_info(struct ath10k *ar, + struct bmi_target_info *target_info) +{ + target_info->version = ATH10K_HW_WCN3990; + target_info->type = ATH10K_HW_WCN3990; + + return 0; +} + +static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n"); + + return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl); +} + +static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) +{ + int resources; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n"); + + if (!force) { + resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe); + + if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) + return; + } + ath10k_ce_per_engine_service(ar, pipe); +} + +static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar, + u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n"); + + for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { + entry = &target_service_to_ce_map_wlan[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; + + return 0; +} + +static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n"); + + (void)ath10k_snoc_hif_map_service_to_pipe(ar, + ATH10K_HTC_SVC_ID_RSVD_CTRL, + ul_pipe, dl_pipe); +} + +static inline void ath10k_snoc_irq_disable(struct ath10k *ar) +{ + ath10k_ce_disable_interrupts(ar); +} + +static inline void ath10k_snoc_irq_enable(struct ath10k *ar) +{ + ath10k_ce_enable_interrupts(ar); +} + +static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct sk_buff *skb; + struct ath10k *ar; + int i; + + ar = snoc_pipe->hif_ce_state; + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->dest_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + } +} + +static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) +{ + struct ath10k_ce_pipe *ce_pipe; + struct ath10k_ce_ring *ce_ring; + struct ath10k_snoc *ar_snoc; + struct sk_buff *skb; + struct ath10k *ar; + int i; + + ar = snoc_pipe->hif_ce_state; + ar_snoc = ath10k_snoc_priv(ar); + ce_pipe = snoc_pipe->ce_hdl; + ce_ring = ce_pipe->src_ring; + + if (!ce_ring) + return; + + if (!snoc_pipe->buf_sz) + return; + + for (i = 0; i < ce_ring->nentries; i++) { + skb = ce_ring->per_transfer_context[i]; + if (!skb) + continue; + + ce_ring->per_transfer_context[i] = NULL; + + ath10k_htc_tx_completion_handler(ar, skb); + } +} + +static void ath10k_snoc_buffer_cleanup(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_snoc_pipe *pipe_info; + int pipe_num; + + del_timer_sync(&ar_snoc->rx_post_retry); + for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { + pipe_info = &ar_snoc->pipe_info[pipe_num]; + ath10k_snoc_rx_pipe_cleanup(pipe_info); + ath10k_snoc_tx_pipe_cleanup(pipe_info); + } +} + +static void ath10k_snoc_hif_stop(struct ath10k *ar) +{ + ath10k_snoc_irq_disable(ar); + ath10k_snoc_buffer_cleanup(ar); + napi_synchronize(&ar->napi); + napi_disable(&ar->napi); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); +} + +static int ath10k_snoc_hif_start(struct ath10k *ar) +{ + ath10k_snoc_irq_enable(ar); + ath10k_snoc_rx_post(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); + + return 0; +} + +static int ath10k_snoc_init_pipes(struct ath10k *ar) +{ + int i, ret; + + for (i = 0; i < CE_COUNT; i++) { + ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", + i, ret); + return ret; + } + } + + return 0; +} + +static int ath10k_snoc_wlan_enable(struct ath10k *ar) +{ + return 0; +} + +static void ath10k_snoc_wlan_disable(struct ath10k *ar) +{ +} + +static void ath10k_snoc_hif_power_down(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); + + ath10k_snoc_wlan_disable(ar); + ath10k_ce_free_rri(ar); +} + +static int ath10k_snoc_hif_power_up(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n", + __func__, ar->state); + + ret = ath10k_snoc_wlan_enable(ar); + if (ret) { + ath10k_err(ar, "failed to enable wcn3990: %d\n", ret); + return ret; + } + + ath10k_ce_alloc_rri(ar); + + ret = ath10k_snoc_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto err_wlan_enable; + } + + napi_enable(&ar->napi); + return 0; + +err_wlan_enable: + ath10k_snoc_wlan_disable(ar); + + return ret; +} + +static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, + .start = ath10k_snoc_hif_start, + .stop = ath10k_snoc_hif_stop, + .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe, + .get_default_pipe = ath10k_snoc_hif_get_default_pipe, + .power_up = ath10k_snoc_hif_power_up, + .power_down = ath10k_snoc_hif_power_down, + .tx_sg = ath10k_snoc_hif_tx_sg, + .send_complete_check = ath10k_snoc_hif_send_complete_check, + .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number, + .get_target_info = ath10k_snoc_hif_get_target_info, +}; + +static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { + .read32 = ath10k_snoc_read32, + .write32 = ath10k_snoc_write32, +}; + +int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int i; + + for (i = 0; i < CE_COUNT_MAX; i++) { + if (ar_snoc->ce_irqs[i].irq_line == irq) + return i; + } + ath10k_err(ar, "No matching CE id for irq %d\n", irq); + + return -EINVAL; +} + +static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq); + + if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) { + ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, + ce_id); + return IRQ_HANDLED; + } + + ath10k_snoc_irq_disable(ar); + napi_schedule(&ar->napi); + + return IRQ_HANDLED; +} + +static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) +{ + struct ath10k *ar = container_of(ctx, struct ath10k, napi); + int done = 0; + + ath10k_ce_per_engine_service_any(ar); + done = ath10k_htt_txrx_compl_task(ar, budget); + + if (done < budget) { + napi_complete(ctx); + ath10k_snoc_irq_enable(ar); + } + + return done; +} + +void ath10k_snoc_init_napi(struct ath10k *ar) +{ + netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, + ATH10K_NAPI_BUDGET); +} + +static int ath10k_snoc_request_irq(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int irqflags = IRQF_TRIGGER_RISING; + int ret, id; + + for (id = 0; id < CE_COUNT_MAX; id++) { + ret = request_irq(ar_snoc->ce_irqs[id].irq_line, + ath10k_snoc_per_engine_handler, + irqflags, ce_name[id], ar); + if (ret) { + ath10k_err(ar, + "failed to register IRQ handler for CE %d: %d", + id, ret); + goto err_irq; + } + } + + return 0; + +err_irq: + for (id -= 1; id >= 0; id--) + free_irq(ar_snoc->ce_irqs[id].irq_line, ar); + + return ret; +} + +static void ath10k_snoc_free_irq(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int id; + + for (id = 0; id < CE_COUNT_MAX; id++) + free_irq(ar_snoc->ce_irqs[id].irq_line, ar); +} + +static int ath10k_snoc_resource_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct platform_device *pdev; + struct resource *res; + int i, ret = 0; + + pdev = ar_snoc->dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase"); + if (!res) { + ath10k_err(ar, "Memory base not found in DT\n"); + return -EINVAL; + } + + ar_snoc->mem_pa = res->start; + ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa, + resource_size(res)); + if (!ar_snoc->mem) { + ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n", + &ar_snoc->mem_pa); + return -EINVAL; + } + + for (i = 0; i < CE_COUNT; i++) { + res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i); + if (!res) { + ath10k_err(ar, "failed to get IRQ%d\n", i); + ret = -ENODEV; + goto out; + } + ar_snoc->ce_irqs[i].irq_line = res->start; + } + +out: + return ret; +} + +static int ath10k_snoc_setup_resource(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_snoc_pipe *pipe; + int i, ret; + + timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0); + spin_lock_init(&ce->ce_lock); + for (i = 0; i < CE_COUNT; i++) { + pipe = &ar_snoc->pipe_info[i]; + pipe->ce_hdl = &ce->ce_states[i]; + pipe->pipe_num = i; + pipe->hif_ce_state = ar; + + ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", + i, ret); + return ret; + } + + pipe->buf_sz = host_ce_config_wlan[i].src_sz_max; + } + ath10k_snoc_init_napi(ar); + + return 0; +} + +static void ath10k_snoc_release_resource(struct ath10k *ar) +{ + int i; + + netif_napi_del(&ar->napi); + for (i = 0; i < CE_COUNT; i++) + ath10k_ce_free_pipe(ar, i); +} + +static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_vreg_info *vreg_info) +{ + struct regulator *reg; + int ret = 0; + + reg = devm_regulator_get_optional(dev, vreg_info->name); + + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + + if (ret == -EPROBE_DEFER) { + ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n", + vreg_info->name); + return ret; + } + if (vreg_info->required) { + ath10k_err(ar, "Regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + return ret; + } + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "Optional regulator %s doesn't exist: %d\n", + vreg_info->name, ret); + goto done; + } + + vreg_info->reg = reg; + +done: + ath10k_dbg(ar, ATH10K_DBG_SNOC, + "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n", + vreg_info->name, vreg_info->min_v, vreg_info->max_v, + vreg_info->load_ua, vreg_info->settle_delay); + + return 0; +} + +static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev, + struct ath10k_wcn3990_clk_info *clk_info) +{ + struct clk *handle; + int ret = 0; + + handle = devm_clk_get(dev, clk_info->name); + if (IS_ERR(handle)) { + ret = PTR_ERR(handle); + if (clk_info->required) { + ath10k_err(ar, "snoc clock %s isn't available: %d\n", + clk_info->name, ret); + return ret; + } + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n", + clk_info->name, + ret); + return 0; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n", + clk_info->name, clk_info->freq); + + clk_info->handle = handle; + + return ret; +} + +static int ath10k_wcn3990_vreg_on(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_vreg_info *vreg_info; + int ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n", + vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v, + vreg_info->max_v); + if (ret) { + ath10k_err(ar, + "failed to set regulator %s voltage-min: %d voltage-max: %d\n", + vreg_info->name, vreg_info->min_v, vreg_info->max_v); + goto err_reg_config; + } + + if (vreg_info->load_ua) { + ret = regulator_set_load(vreg_info->reg, + vreg_info->load_ua); + if (ret < 0) { + ath10k_err(ar, + "failed to set regulator %s load: %d\n", + vreg_info->name, + vreg_info->load_ua); + goto err_reg_config; + } + } + + ret = regulator_enable(vreg_info->reg); + if (ret) { + ath10k_err(ar, "failed to enable regulator %s\n", + vreg_info->name); + goto err_reg_config; + } + + if (vreg_info->settle_delay) + udelay(vreg_info->settle_delay); + } + + return 0; + +err_reg_config: + for (; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + regulator_disable(vreg_info->reg); + regulator_set_load(vreg_info->reg, 0); + regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v); + } + + return ret; +} + +static int ath10k_wcn3990_vreg_off(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_vreg_info *vreg_info; + int ret = 0; + int i; + + for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) { + vreg_info = &ar_snoc->vreg[i]; + + if (!vreg_info->reg) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n", + vreg_info->name); + + ret = regulator_disable(vreg_info->reg); + if (ret) + ath10k_err(ar, "failed to disable regulator %s\n", + vreg_info->name); + + ret = regulator_set_load(vreg_info->reg, 0); + if (ret < 0) + ath10k_err(ar, "failed to set load %s\n", + vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, 0, + vreg_info->max_v); + if (ret) + ath10k_err(ar, "failed to set voltage %s\n", + vreg_info->name); + } + + return ret; +} + +static int ath10k_wcn3990_clk_init(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_clk_info *clk_info; + int ret = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n", + clk_info->name); + + if (clk_info->freq) { + ret = clk_set_rate(clk_info->handle, clk_info->freq); + + if (ret) { + ath10k_err(ar, "failed to set clock %s freq %u\n", + clk_info->name, clk_info->freq); + goto err_clock_config; + } + } + + ret = clk_prepare_enable(clk_info->handle); + if (ret) { + ath10k_err(ar, "failed to enable clock %s\n", + clk_info->name); + goto err_clock_config; + } + } + + return 0; + +err_clock_config: + for (; i >= 0; i--) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + clk_disable_unprepare(clk_info->handle); + } + + return ret; +} + +static int ath10k_wcn3990_clk_deinit(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + struct ath10k_wcn3990_clk_info *clk_info; + int i; + + for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) { + clk_info = &ar_snoc->clk[i]; + + if (!clk_info->handle) + continue; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n", + clk_info->name); + + clk_disable_unprepare(clk_info->handle); + } + + return 0; +} + +static int ath10k_hw_power_on(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n"); + + ret = ath10k_wcn3990_vreg_on(ar); + if (ret) + return ret; + + ret = ath10k_wcn3990_clk_init(ar); + if (ret) + goto vreg_off; + + return ret; + +vreg_off: + ath10k_wcn3990_vreg_off(ar); + return ret; +} + +static int ath10k_hw_power_off(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n"); + + ath10k_wcn3990_clk_deinit(ar); + + ret = ath10k_wcn3990_vreg_off(ar); + + return ret; +} + +static const struct of_device_id ath10k_snoc_dt_match[] = { + { .compatible = "qcom,wcn3990-wifi", + .data = &drv_priv, + }, + { } +}; +MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match); + +static int ath10k_snoc_probe(struct platform_device *pdev) +{ + const struct ath10k_snoc_drv_priv *drv_data; + const struct of_device_id *of_id; + struct ath10k_snoc *ar_snoc; + struct device *dev; + struct ath10k *ar; + int ret; + u32 i; + + of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "failed to find matching device tree id\n"); + return -EINVAL; + } + + drv_data = of_id->data; + dev = &pdev->dev; + + ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask); + if (ret) { + dev_err(dev, "failed to set dma mask: %d", ret); + return ret; + } + + ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC, + drv_data->hw_rev, &ath10k_snoc_hif_ops); + if (!ar) { + dev_err(dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ar_snoc = ath10k_snoc_priv(ar); + ar_snoc->dev = pdev; + platform_set_drvdata(pdev, ar); + ar_snoc->ar = ar; + ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops; + ar->ce_priv = &ar_snoc->ce; + + ath10k_snoc_resource_init(ar); + if (ret) { + ath10k_warn(ar, "failed to initialize resource: %d\n", ret); + goto err_core_destroy; + } + + ath10k_snoc_setup_resource(ar); + if (ret) { + ath10k_warn(ar, "failed to setup resource: %d\n", ret); + goto err_core_destroy; + } + ret = ath10k_snoc_request_irq(ar); + if (ret) { + ath10k_warn(ar, "failed to request irqs: %d\n", ret); + goto err_release_resource; + } + + ar_snoc->vreg = vreg_cfg; + for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) { + ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]); + if (ret) + goto err_free_irq; + } + + ar_snoc->clk = clk_cfg; + for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) { + ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]); + if (ret) + goto err_free_irq; + } + + ret = ath10k_hw_power_on(ar); + if (ret) { + ath10k_err(ar, "failed to power on device: %d\n", ret); + goto err_free_irq; + } + + ret = ath10k_core_register(ar, drv_data->hw_rev); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_hw_power_off; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); + ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!"); + + return 0; + +err_hw_power_off: + ath10k_hw_power_off(ar); + +err_free_irq: + ath10k_snoc_free_irq(ar); + +err_release_resource: + ath10k_snoc_release_resource(ar); + +err_core_destroy: + ath10k_core_destroy(ar); + + return ret; +} + +static int ath10k_snoc_remove(struct platform_device *pdev) +{ + struct ath10k *ar = platform_get_drvdata(pdev); + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); + ath10k_core_unregister(ar); + ath10k_hw_power_off(ar); + ath10k_snoc_free_irq(ar); + ath10k_snoc_release_resource(ar); + ath10k_core_destroy(ar); + + return 0; +} + +static struct platform_driver ath10k_snoc_driver = { + .probe = ath10k_snoc_probe, + .remove = ath10k_snoc_remove, + .driver = { + .name = "ath10k_snoc", + .of_match_table = ath10k_snoc_dt_match, + }, +}; + +static int __init ath10k_snoc_init(void) +{ + int ret; + + ret = platform_driver_register(&ath10k_snoc_driver); + if (ret) + pr_err("failed to register ath10k snoc driver: %d\n", + ret); + + return ret; +} +module_init(ath10k_snoc_init); + +static void __exit ath10k_snoc_exit(void) +{ + platform_driver_unregister(&ath10k_snoc_driver); +} +module_exit(ath10k_snoc_exit); + +MODULE_AUTHOR("Qualcomm"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices"); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h new file mode 100644 index 000000000000..05dc98f46ccd --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SNOC_H_ +#define _SNOC_H_ + +#include "hw.h" +#include "ce.h" +#include "pci.h" + +struct ath10k_snoc_drv_priv { + enum ath10k_hw_rev hw_rev; + u64 dma_mask; +}; + +struct snoc_state { + u32 pipe_cfg_addr; + u32 svc_to_pipe_map; +}; + +struct ath10k_snoc_pipe { + struct ath10k_ce_pipe *ce_hdl; + u8 pipe_num; + struct ath10k *hif_ce_state; + size_t buf_sz; + /* protect ce info */ + spinlock_t pipe_lock; + struct ath10k_snoc *ar_snoc; +}; + +struct ath10k_snoc_target_info { + u32 target_version; + u32 target_type; + u32 target_revision; + u32 soc_version; +}; + +struct ath10k_snoc_ce_irq { + u32 irq_line; +}; + +struct ath10k_wcn3990_vreg_info { + struct regulator *reg; + const char *name; + u32 min_v; + u32 max_v; + u32 load_ua; + unsigned long settle_delay; + bool required; +}; + +struct ath10k_wcn3990_clk_info { + struct clk *handle; + const char *name; + u32 freq; + bool required; +}; + +struct ath10k_snoc { + struct platform_device *dev; + struct ath10k *ar; + void __iomem *mem; + dma_addr_t mem_pa; + struct ath10k_snoc_target_info target_info; + size_t mem_len; + struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX]; + struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX]; + struct ath10k_ce ce; + struct timer_list rx_post_retry; + struct ath10k_wcn3990_vreg_info *vreg; + struct ath10k_wcn3990_clk_info *clk; +}; + +static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) +{ + return (struct ath10k_snoc *)ar->drv_priv; +} + +void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); +u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); + +#endif /* _SNOC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 70e23bbf7171..cda164f6e9f6 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -119,6 +120,13 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, info->flags &= ~IEEE80211_TX_STAT_ACK; } + if (tx_done->status == HTT_TX_COMPL_STATE_ACK && + tx_done->ack_rssi != ATH10K_INVALID_RSSI) { + info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + + tx_done->ack_rssi; + info->status.is_valid_ack_signal = true; + } + ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index c35e45340b4f..5ecce04005d2 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -25,6 +25,7 @@ struct sk_buff; struct wmi_ops { void (*rx)(struct ath10k *ar, struct sk_buff *skb); void (*map_svc)(const __le32 *in, unsigned long *out, size_t len); + void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len); int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb, struct wmi_scan_ev_arg *arg); @@ -54,6 +55,11 @@ struct wmi_ops { struct wmi_wow_ev_arg *arg); int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb, struct wmi_echo_ev_arg *arg); + int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg); + int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg); + enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar); struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); @@ -115,6 +121,8 @@ struct wmi_ops { u32 value); struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, const struct wmi_scan_chan_list_arg *arg); + struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar, + u32 prob_req_oui); struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id, const void *bcn, size_t bcn_len, u32 bcn_paddr, bool dtim_zero, @@ -182,6 +190,9 @@ struct wmi_ops { const struct wmi_tdls_peer_update_cmd_arg *arg, const struct wmi_tdls_peer_capab_arg *cap, const struct wmi_channel_arg *chan); + struct sk_buff *(*gen_radar_found) + (struct ath10k *ar, + const struct ath10k_radar_found_info *arg); struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable); struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar, u32 param); @@ -230,6 +241,17 @@ ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out, } static inline int +ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out, + size_t len) +{ + if (!ar->wmi.ops->map_svc_ext) + return -EOPNOTSUPP; + + ar->wmi.ops->map_svc_ext(in, out, len); + return 0; +} + +static inline int ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb, struct wmi_scan_ev_arg *arg) { @@ -330,6 +352,15 @@ ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb, } static inline int +ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_svc_avail) + return -EOPNOTSUPP; + return ar->wmi.ops->pull_svc_avail(ar, skb, arg); +} + +static inline int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb, struct ath10k_fw_stats *stats) { @@ -369,6 +400,16 @@ ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_echo_ev(ar, skb, arg); } +static inline int +ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_dfs_status_ev) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg); +} + static inline enum wmi_txbf_conf ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar) { @@ -891,6 +932,26 @@ ath10k_wmi_scan_chan_list(struct ath10k *ar, } static inline int +ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN]) +{ + struct sk_buff *skb; + u32 prob_req_oui; + + prob_req_oui = (((u32)mac_addr[0]) << 16) | + (((u32)mac_addr[1]) << 8) | mac_addr[2]; + + if (!ar->wmi.ops->gen_scan_prob_req_oui) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->scan_prob_req_oui_cmdid); +} + +static inline int ath10k_wmi_peer_assoc(struct ath10k *ar, const struct wmi_peer_assoc_complete_arg *arg) { @@ -1465,4 +1526,21 @@ ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param) ar->wmi.cmd->pdev_get_tpc_table_cmdid); } +static inline int +ath10k_wmi_report_radar_found(struct ath10k *ar, + const struct ath10k_radar_found_info *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_radar_found) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_radar_found(ar, arg); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->radar_found_cmdid); +} + #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 9d1b0a459069..2e34a1fc5ba6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -594,6 +594,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_READY_EVENTID: ath10k_wmi_event_ready(ar, skb); break; + case WMI_TLV_SERVICE_AVAILABLE_EVENTID: + ath10k_wmi_event_service_available(ar, skb); + break; case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID: ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); break; @@ -1117,6 +1120,39 @@ static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, return 0; } +static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_svc_avail_ev_arg *arg = data; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: + arg->service_map_ext_len = *(__le32 *)ptr; + arg->service_map_ext = ptr + sizeof(__le32); + return 0; + default: + break; + } + return -EPROTO; +} + +static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_svc_avail_ev_arg *arg) +{ + int ret; + + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_svc_avail_parse, arg); + + if (ret) { + ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret); + return ret; + } + + return 0; +} + static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src, struct ath10k_fw_stats_vdev *dst) { @@ -1600,6 +1636,8 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, cmd->num_bssids = __cpu_to_le32(arg->n_bssids); cmd->ie_len = __cpu_to_le32(arg->ie_len); cmd->num_probes = __cpu_to_le32(3); + ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr); + ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr); /* FIXME: There are some scan flag inconsistencies across firmwares, * e.g. WMI-TLV inverts the logic behind the following flag. @@ -2447,6 +2485,27 @@ ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, } static struct sk_buff * +ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui) +{ + struct wmi_scan_prob_req_oui_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + cmd->prob_req_oui = __cpu_to_le32(prob_req_oui); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n"); + return skb; +} + +static struct sk_buff * ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn, size_t bcn_len, u32 bcn_paddr, bool dtim_zero, @@ -3077,6 +3136,37 @@ ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, } static struct sk_buff * +ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, + u32 duration, u32 next_offset, + u32 enabled) +{ + struct wmi_tlv_set_quiet_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + tlv = (void *)skb->data; + tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD); + tlv->len = __cpu_to_le16(sizeof(*cmd)); + cmd = (void *)tlv->value; + + /* vdev_id is not in use, set to 0 */ + cmd->vdev_id = __cpu_to_le32(0); + cmd->period = __cpu_to_le32(period); + cmd->duration = __cpu_to_le32(duration); + cmd->next_start = __cpu_to_le32(next_offset); + cmd->enabled = __cpu_to_le32(enabled); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv quiet param: period %u duration %u enabled %d\n", + period, duration, enabled); + return skb; +} + +static struct sk_buff * ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) { struct wmi_tlv_wow_enable_cmd *cmd; @@ -3416,6 +3506,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = { .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID, .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID, @@ -3740,6 +3831,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { static const struct wmi_ops wmi_tlv_ops = { .rx = ath10k_wmi_tlv_op_rx, .map_svc = wmi_tlv_svc_map, + .map_svc_ext = wmi_tlv_svc_map_ext, .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, @@ -3751,6 +3843,7 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, + .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail, .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, @@ -3782,6 +3875,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps, .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps, .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list, + .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui, .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma, .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, @@ -3791,7 +3885,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, - /* .gen_pdev_set_quiet_mode not implemented */ + .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode, .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature, /* .gen_addba_clear_resp not implemented */ /* .gen_addba_send not implemented */ diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index fa3773ec7c68..3e1e340cd834 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1,6 +1,7 @@ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -295,6 +296,7 @@ enum wmi_tlv_cmd_id { enum wmi_tlv_event_id { WMI_TLV_SERVICE_READY_EVENTID = 0x1, WMI_TLV_READY_EVENTID, + WMI_TLV_SERVICE_AVAILABLE_EVENTID, WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN), WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV), WMI_TLV_CHAN_INFO_EVENTID, @@ -949,6 +951,275 @@ enum wmi_tlv_tag { WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE, WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD, WMI_TLV_TAG_STRUCT_MGMT_TX_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT, + WMI_TLV_TAG_STRUCT_SOC_SET_ANTENNA_MODE_CMD, + WMI_TLV_TAG_STRUCT_WOW_UDP_SVC_OFLD_CMD, + WMI_TLV_TAG_STRUCT_LRO_INFO_CMD, + WMI_TLV_TAG_STRUCT_ROAM_EARLYSTOP_RSSI_THRES_PARAM, + WMI_TLV_TAG_STRUCT_SERVICE_READY_EXT_EVENT, + WMI_TLV_TAG_STRUCT_MAWC_SENSOR_REPORT_IND_CMD, + WMI_TLV_TAG_STRUCT_MAWC_ENABLE_SENSOR_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_NLO_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_MAWC_CMD, + WMI_TLV_TAG_STRUCT_PEER_ASSOC_CONF_EVENT, + WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_PARAM_CMD, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PMF_OFFLOAD_SET_SA_QUERY_CMD, + WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_CMD, + WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_SCPC_EVENT, + WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_CHAINMASK_LIST, + WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_BPF_GET_CAPABILITY_CMD, + WMI_TLV_TAG_STRUCT_BPF_CAPABILITY_INFO_EVT, + WMI_TLV_TAG_STRUCT_BPF_GET_VDEV_STATS_CMD, + WMI_TLV_TAG_STRUCT_BPF_VDEV_STATS_INFO_EVT, + WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_INSTRUCTIONS_CMD, + WMI_TLV_TAG_STRUCT_BPF_DEL_VDEV_INSTRUCTIONS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_DELETE_RESP_EVENT, + WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_DENSE_THRES_PARAM, + WMI_TLV_TAG_STRUCT_ENLO_CANDIDATE_SCORE_PARAM, + WMI_TLV_TAG_STRUCT_PEER_UPDATE_WDS_ENTRY_CMD, + WMI_TLV_TAG_STRUCT_VDEV_CONFIG_RATEMASK, + WMI_TLV_TAG_STRUCT_PDEV_FIPS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_SWITCH_TBL_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_CTL_TABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_MIMOGAIN_TABLE_CMD, + WMI_TLV_TAG_STRUCT_FWTEST_SET_PARAM_CMD, + WMI_TLV_TAG_STRUCT_PEER_ATF_REQUEST, + WMI_TLV_TAG_STRUCT_VDEV_ATF_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_CCK_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_OFDM_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_INST_RSSI_STATS_RESP, + WMI_TLV_TAG_STRUCT_MED_UTIL_REPORT_EVENT, + WMI_TLV_TAG_STRUCT_PEER_STA_PS_STATECHANGE_EVENT, + WMI_TLV_TAG_STRUCT_WDS_ADDR_EVENT, + WMI_TLV_TAG_STRUCT_PEER_RATECODE_LIST_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_TPC_EVENT, + WMI_TLV_TAG_STRUCT_ANI_OFDM_EVENT, + WMI_TLV_TAG_STRUCT_ANI_CCK_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_CHANNEL_HOPPING_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_FIPS_EVENT, + WMI_TLV_TAG_STRUCT_ATF_PEER_INFO, + WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CMD, + WMI_TLV_TAG_STRUCT_VDEV_FILTER_NRP_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_QBOOST_CFG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_GPIO_HANDLE, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES, + WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_CTRL_CHAIN, + WMI_TLV_TAG_STRUCT_PEER_CCK_OFDM_RATE_INFO, + WMI_TLV_TAG_STRUCT_PEER_MCS_RATE_INFO, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM, + WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM, + WMI_TLV_TAG_STRUCT_MU_REPORT_TOTAL_MU, + WMI_TLV_TAG_STRUCT_VDEV_SET_DSCP_TID_MAP_CMD, + WMI_TLV_TAG_STRUCT_ROAM_SET_MBO, + WMI_TLV_TAG_STRUCT_MIB_STATS_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_CREATED_EVENT, + WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_DELETED_EVENT, + WMI_TLV_TAG_STRUCT_NAN_STARTED_CLUSTER_EVENT, + WMI_TLV_TAG_STRUCT_NAN_JOINED_CLUSTER_EVENT, + WMI_TLV_TAG_STRUCT_NDI_GET_CAP_REQ, + WMI_TLV_TAG_STRUCT_NDP_INITIATOR_REQ, + WMI_TLV_TAG_STRUCT_NDP_RESPONDER_REQ, + WMI_TLV_TAG_STRUCT_NDP_END_REQ, + WMI_TLV_TAG_STRUCT_NDI_CAP_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_INITIATOR_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_RESPONDER_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_END_RSP_EVENT, + WMI_TLV_TAG_STRUCT_NDP_INDICATION_EVENT, + WMI_TLV_TAG_STRUCT_NDP_CONFIRM_EVENT, + WMI_TLV_TAG_STRUCT_NDP_END_INDICATION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_SET_QUIET_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_PCL_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_MODE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_HW_MODE_TRANSITION_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY, + WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT, + WMI_TLV_TAG_STRUCT_COEX_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_CONFIG_ENHANCED_MCAST_FILTER, + WMI_TLV_TAG_STRUCT_CHAN_AVOID_RPT_ALLOW_CMD, + WMI_TLV_TAG_STRUCT_SET_PERIODIC_CHANNEL_STATS_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_SET_CUSTOM_AGGR_SIZE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_WAL_POWER_DEBUG_CMD, + WMI_TLV_TAG_STRUCT_MAC_PHY_CAPABILITIES, + WMI_TLV_TAG_STRUCT_HW_MODE_CAPABILITIES, + WMI_TLV_TAG_STRUCT_SOC_MAC_PHY_HW_MODE_CAPS, + WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES_EXT, + WMI_TLV_TAG_STRUCT_SOC_HAL_REG_CAPABILITIES, + WMI_TLV_TAG_STRUCT_VDEV_WISA_CMD, + WMI_TLV_TAG_STRUCT_TX_POWER_LEVEL_STATS_EVT, + WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV, + WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_CONFIG, + WMI_TLV_TAG_STRUCT_WOW_SET_ACTION_WAKE_UP_CMD, + WMI_TLV_TAG_STRUCT_NDP_END_RSP_PER_NDI, + WMI_TLV_TAG_STRUCT_PEER_BWF_REQUEST, + WMI_TLV_TAG_STRUCT_BWF_PEER_INFO, + WMI_TLV_TAG_STRUCT_DBGLOG_TIME_STAMP_SYNC_CMD, + WMI_TLV_TAG_STRUCT_RMC_SET_LEADER_CMD, + WMI_TLV_TAG_STRUCT_RMC_MANUAL_LEADER_EVENT, + WMI_TLV_TAG_STRUCT_PER_CHAIN_RSSI_STATS, + WMI_TLV_TAG_STRUCT_RSSI_STATS, + WMI_TLV_TAG_STRUCT_P2P_LO_START_CMD, + WMI_TLV_TAG_STRUCT_P2P_LO_STOP_CMD, + WMI_TLV_TAG_STRUCT_P2P_LO_STOPPED_EVENT, + WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_SETUP_CMD, + WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_REMOVE_CMD, + WMI_TLV_TAG_STRUCT_SET_MULTIPLE_MCAST_FILTER_CMD, + WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT, + WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_CMD, + WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_REORDER_TIMEOUT_VAL_CMD, + WMI_TLV_TAG_STRUCT_PEER_SET_RX_BLOCKSIZE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_SET_WAKEUP_CONFIG_CMDID, + WMI_TLV_TAG_STRUCT_TLV_BUF_LEN_PARAM, + WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_REQ_CMD, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO, + WMI_TLV_TAG_STRUCT_PDEV_GET_ANTDIV_STATUS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_ANTDIV_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_MNT_FILTER_CMD, + WMI_TLV_TAG_STRUCT_GET_CHIP_POWER_STATS_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHIP_POWER_STATS_EVENT, + WMI_TLV_TAG_STRUCT_COEX_GET_ANTENNA_ISOLATION_CMD, + WMI_TLV_TAG_STRUCT_COEX_REPORT_ISOLATION_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS, + WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS, + WMI_TLV_TAG_STRUCT_TX_STATS, + WMI_TLV_TAG_STRUCT_PEER_AC_TX_STATS, + WMI_TLV_TAG_STRUCT_RX_STATS, + WMI_TLV_TAG_STRUCT_PEER_AC_RX_STATS, + WMI_TLV_TAG_STRUCT_REPORT_STATS_EVENT, + WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS_THRESH, + WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS_THRESH, + WMI_TLV_TAG_STRUCT_TX_STATS_THRESH, + WMI_TLV_TAG_STRUCT_RX_STATS_THRESH, + WMI_TLV_TAG_STRUCT_PDEV_SET_STATS_THRESHOLD_CMD, + WMI_TLV_TAG_STRUCT_REQUEST_WLAN_STATS_CMD, + WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_EVENT, + WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_INFO, + WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_BAND_TO_MAC, + WMI_TLV_TAG_STRUCT_TBTT_OFFSET_INFO, + WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EXT_EVENT, + WMI_TLV_TAG_STRUCT_SAR_LIMITS_CMD, + WMI_TLV_TAG_STRUCT_SAR_LIMIT_CMD_ROW, + WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_CH_CFG_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_ABORT_CMD, + WMI_TLV_TAG_STRUCT_PDEV_DFS_RADAR_DETECTION_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_DFS_CAC_COMPLETE_EVENT, + WMI_TLV_TAG_STRUCT_VENDOR_OUI, + WMI_TLV_TAG_STRUCT_REQUEST_RCPI_CMD, + WMI_TLV_TAG_STRUCT_UPDATE_RCPI_EVENT, + WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD, + WMI_TLV_TAG_STRUCT_PEER_STATS_INFO, + WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT, + WMI_TLV_TAG_STRUCT_PKGID_EVENT, + WMI_TLV_TAG_STRUCT_CONNECTED_NLO_RSSI_PARAMS, + WMI_TLV_TAG_STRUCT_SET_CURRENT_COUNTRY_CMD, + WMI_TLV_TAG_STRUCT_REGULATORY_RULE_STRUCT, + WMI_TLV_TAG_STRUCT_REG_CHAN_LIST_CC_EVENT, + WMI_TLV_TAG_STRUCT_11D_SCAN_START_CMD, + WMI_TLV_TAG_STRUCT_11D_SCAN_STOP_CMD, + WMI_TLV_TAG_STRUCT_11D_NEW_COUNTRY_EVENT, + WMI_TLV_TAG_STRUCT_REQUEST_RADIO_CHAN_STATS_CMD, + WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS, + WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_PER_CONFIG, + WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD, + WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_ACTIVE_MODE_CMD, + WMI_TLV_TAG_STRUCT_HW_DATA_FILTER_CMD, + WMI_TLV_TAG_STRUCT_CONNECTED_NLO_BSS_BAND_RSSI_PREF, + WMI_TLV_TAG_STRUCT_PEER_OPER_MODE_CHANGE_EVENT, + WMI_TLV_TAG_STRUCT_CHIP_POWER_SAVE_FAILURE_DETECTED, + WMI_TLV_TAG_STRUCT_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PKT_ROUTING_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_CMD, + WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_EVENT, + WMI_TLV_TAG_STRUCT_PDEV_SET_DIVERSITY_GAIN_CMD, + WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_COMBO, + WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_CAPABILITY, + WMI_TLV_TAG_STRUCT_VDEV_SET_ARP_STATS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_CMD, + WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_EVENT, + WMI_TLV_TAG_STRUCT_IFACE_OFFLOAD_STATS, + WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD_SUB_STRUCT_PARAM, + WMI_TLV_TAG_STRUCT_RSSI_CTL_EXT, + WMI_TLV_TAG_STRUCT_SINGLE_PHYERR_EXT_RX_HDR, + WMI_TLV_TAG_STRUCT_COEX_BT_ACTIVITY_EVENT, + WMI_TLV_TAG_STRUCT_VDEV_GET_TX_POWER_CMD, + WMI_TLV_TAG_STRUCT_VDEV_TX_POWER_EVENT, + WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_COMPL_EVENT, + WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_SEND_CMD, + WMI_TLV_TAG_STRUCT_TX_SEND_PARAMS, + WMI_TLV_TAG_STRUCT_HE_RATE_SET, + WMI_TLV_TAG_STRUCT_CONGESTION_STATS, + WMI_TLV_TAG_STRUCT_SET_INIT_COUNTRY_CMD, + WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE, + WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE_PARAM_TLV, + WMI_TLV_TAG_STRUCT_PDEV_DIV_GET_RSSI_ANTID, + WMI_TLV_TAG_STRUCT_THERM_THROT_CONFIG_REQUEST, + WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_CONFIG_INFO, + WMI_TLV_TAG_STRUCT_THERM_THROT_STATS_EVENT, + WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_STATS_INFO, + WMI_TLV_TAG_STRUCT_PDEV_DIV_RSSI_ANTID_EVENT, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CAPABILITIES, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_REQ, + WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_RSP, + WMI_TLV_TAG_STRUCT_OEM_INDIRECT_DATA, + WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE, + WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE_ENTRY, + WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_LCA_DISALLOW_CONFIG_TLV_PARAM, + WMI_TLV_TAG_STRUCT_VDEV_LIMIT_OFFCHAN_CMD, + WMI_TLV_TAG_STRUCT_ROAM_RSSI_REJECTION_OCE_CONFIG_PARAM, + WMI_TLV_TAG_STRUCT_UNIT_TEST_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_FILS_OFFLOAD_TLV_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PMK_CACHE_CMD, + WMI_TLV_TAG_STRUCT_PMK_CACHE, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_FILS_HLP_PKT_CMD, + WMI_TLV_TAG_STRUCT_ROAM_FILS_SYNCH_TLV_PARAM, + WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_EXTENDED_TLV_PARAM, + WMI_TLV_TAG_STRUCT_ROAM_BG_SCAN_ROAMING_PARAM, + WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_PARAMS_CMD, + WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_SET_ENABLE_CMD, + WMI_TLV_TAG_STRUCT_OIC_PING_HANDOFF_EVENT, + WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_OFFLOAD_CMD, + WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_EVENT, + WMI_TLV_TAG_STRUCT_BTM_CONFIG, + WMI_TLV_TAG_STRUCT_DEBUG_MESG_FW_DATA_STALL_PARAM, + WMI_TLV_TAG_STRUCT_WLM_CONFIG_CMD, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_REQUEST, + WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_EVENT, + WMI_TLV_TAG_STRUCT_ROAM_CND_SCORING_PARAM, + WMI_TLV_TAG_STRUCT_PDEV_CONFIG_VENDOR_OUI_ACTION, + WMI_TLV_TAG_STRUCT_VENDOR_OUI_EXT, + WMI_TLV_TAG_STRUCT_ROAM_SYNCH_FRAME_EVENT, + WMI_TLV_TAG_STRUCT_FD_SEND_FROM_HOST_CMD, + WMI_TLV_TAG_STRUCT_ENABLE_FILS_CMD, + WMI_TLV_TAG_STRUCT_HOST_SWFDA_EVENT, WMI_TLV_TAG_MAX }; @@ -1068,16 +1339,74 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_WLAN_STATS_REPORT, WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT, WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD, + WMI_TLV_SERVICE_RCPI_SUPPORT, + WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT, + WMI_TLV_SERVICE_PEER_STATS_INFO, + WMI_TLV_SERVICE_REGULATORY_DB, + WMI_TLV_SERVICE_11D_OFFLOAD, + WMI_TLV_SERVICE_HW_DATA_FILTERING, + WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART, + WMI_TLV_SERVICE_PKT_ROUTING, + WMI_TLV_SERVICE_CHECK_CAL_VERSION, + WMI_TLV_SERVICE_OFFCHAN_TX_WMI, + WMI_TLV_SERVICE_8SS_TX_BFEE, + WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_TLV_SERVICE_ACK_TIMEOUT, + WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64, + WMI_TLV_MAX_SERVICE = 128, + +/* NOTE: + * The above service flags are delivered in the wmi_service_bitmap field + * of the WMI_TLV_SERVICE_READY_EVENT message. + * The below service flags are delivered in a WMI_TLV_SERVICE_AVAILABLE_EVENT + * message rather than in the WMI_TLV_SERVICE_READY_EVENT message's + * wmi_service_bitmap field. + * The WMI_TLV_SERVICE_AVAILABLE_EVENT message immediately precedes the + * WMI_TLV_SERVICE_READY_EVENT message. + */ + + WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128, + WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT, + WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT, + WMI_TLV_SERVICE_FILS_SUPPORT, + WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD, + WMI_TLV_SERVICE_WLAN_DHCP_RENEW, + WMI_TLV_SERVICE_MAWC_SUPPORT, + WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG, + WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT, + WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT, + WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT, + WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT, + WMI_TLV_SERVICE_THERM_THROT, + WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT, + WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN, + WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143, + WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144, + WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145, + WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146, + WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147, + WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148, + WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149, + WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150, + WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151, + WMI_TLV_SERVICE_STA_TWT = 152, + WMI_TLV_SERVICE_AP_TWT = 153, + WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154, + WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155, + + WMI_TLV_MAX_EXT_SERVICE = 256, }; -#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ - ((svc_id) < (len) && \ - __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ - BIT((svc_id) % (sizeof(u32)))) +#define WMI_TLV_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) < (WMI_TLV_MAX_EXT_SERVICE) && \ + (svc_id) >= (len) && \ + __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 32]) & \ + BIT(((((svc_id) - (len)) % 32) & 0x1f))) #define SVCMAP(x, y, len) \ do { \ - if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \ + if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \ + (WMI_TLV_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \ __set_bit(y, out); \ } while (0) @@ -1228,6 +1557,14 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len) WMI_SERVICE_MGMT_TX_WMI, len); } +static inline void +wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len) +{ + SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, + WMI_SERVICE_SPOOF_MAC_SUPPORT, + WMI_TLV_MAX_SERVICE); +} + #undef SVCMAP struct wmi_tlv { @@ -1370,6 +1707,15 @@ struct wmi_tlv_scan_chan_list_cmd { __le32 num_scan_chans; } __packed; +struct wmi_scan_prob_req_oui_cmd { +/* OUI to be used in Probe Request frame when random MAC address is + * requested part of scan parameters. This is applied to both FW internal + * scans and host initiated scans. Host can request for random MAC address + * with WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag. + */ + __le32 prob_req_oui; +} __packed; + struct wmi_tlv_start_scan_cmd { struct wmi_start_scan_common common; __le32 burst_duration_ms; @@ -1378,6 +1724,8 @@ struct wmi_tlv_start_scan_cmd { __le32 num_ssids; __le32 ie_len; __le32 num_probes; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; } __packed; struct wmi_tlv_vdev_start_cmd { @@ -1605,6 +1953,21 @@ struct wmi_tlv_wow_add_del_event_cmd { __le32 event_bitmap; } __packed; +/* Command to set/unset chip in quiet mode */ +struct wmi_tlv_set_quiet_cmd { + __le32 vdev_id; + + /* in TUs */ + __le32 period; + + /* in TUs */ + __le32 duration; + + /* offset in TUs */ + __le32 next_start; + __le32 enabled; +} __packed; + struct wmi_tlv_wow_enable_cmd { __le32 enable; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index c5e1ca5945db..f97ab795cf2e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -34,6 +34,7 @@ #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ) +#define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6) /* MAIN WMI cmd track */ static struct wmi_cmd_map wmi_cmd_map = { @@ -42,6 +43,7 @@ static struct wmi_cmd_map wmi_cmd_map = { .stop_scan_cmdid = WMI_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID, @@ -198,6 +200,7 @@ static struct wmi_cmd_map wmi_cmd_map = { .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.X WMI cmd track */ @@ -207,6 +210,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = { .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID, @@ -365,6 +369,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = { .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED, .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.2.4 WMI cmd track */ @@ -374,6 +379,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, @@ -532,6 +538,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { .pdev_bss_chan_info_request_cmdid = WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; /* 10.4 WMI cmd track */ @@ -541,6 +548,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID, @@ -741,6 +749,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID, .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID, .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, + .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID, }; /* MAIN WMI VDEV param map */ @@ -1338,6 +1347,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID, .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID, .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED, + .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED, .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID, .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID, .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID, @@ -1485,6 +1495,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = { .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, + .radar_found_cmdid = WMI_CMD_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { @@ -2313,7 +2324,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) u32 phy_mode; u32 snr; u32 rate; - u32 buf_len; u16 fc; int ret; @@ -2325,7 +2335,6 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) } channel = __le32_to_cpu(arg.channel); - buf_len = __le32_to_cpu(arg.buf_len); rx_status = __le32_to_cpu(arg.status); snr = __le32_to_cpu(arg.snr); phy_mode = __le32_to_cpu(arg.phy_mode); @@ -2735,14 +2744,13 @@ static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats *stats) { const struct wmi_stats_event *ev = (void *)skb->data; - u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + u32 num_pdev_stats, num_peer_stats; int i; if (!skb_pull(skb, sizeof(*ev))) return -EPROTO; num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { @@ -2790,14 +2798,13 @@ static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar, struct ath10k_fw_stats *stats) { const struct wmi_stats_event *ev = (void *)skb->data; - u32 num_pdev_stats, num_vdev_stats, num_peer_stats; + u32 num_pdev_stats, num_peer_stats; int i; if (!skb_pull(skb, sizeof(*ev))) return -EPROTO; num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { @@ -2851,7 +2858,6 @@ static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, const struct wmi_10_2_stats_event *ev = (void *)skb->data; u32 num_pdev_stats; u32 num_pdev_ext_stats; - u32 num_vdev_stats; u32 num_peer_stats; int i; @@ -2860,7 +2866,6 @@ static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar, num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { @@ -2930,7 +2935,6 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, const struct wmi_10_2_stats_event *ev = (void *)skb->data; u32 num_pdev_stats; u32 num_pdev_ext_stats; - u32 num_vdev_stats; u32 num_peer_stats; int i; @@ -2939,7 +2943,6 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); - num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); for (i = 0; i < num_pdev_stats; i++) { @@ -3686,6 +3689,68 @@ void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb) ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n"); } +static void ath10k_radar_detected(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); + ATH10K_DFS_STAT_INC(ar, radar_detected); + + /* Control radar events reporting in debugfs file + * dfs_block_radar_events + */ + if (ar->dfs_block_radar_events) + ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); + else + ieee80211_radar_detected(ar->hw); +} + +static void ath10k_radar_confirmation_work(struct work_struct *work) +{ + struct ath10k *ar = container_of(work, struct ath10k, + radar_confirmation_work); + struct ath10k_radar_found_info radar_info; + int ret, time_left; + + reinit_completion(&ar->wmi.radar_confirm); + + spin_lock_bh(&ar->data_lock); + memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info)); + spin_unlock_bh(&ar->data_lock); + + ret = ath10k_wmi_report_radar_found(ar, &radar_info); + if (ret) { + ath10k_warn(ar, "failed to send radar found %d\n", ret); + goto wait_complete; + } + + time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm, + ATH10K_WMI_DFS_CONF_TIMEOUT_HZ); + if (time_left) { + /* DFS Confirmation status event received and + * necessary action completed. + */ + goto wait_complete; + } else { + /* DFS Confirmation event not received from FW.Considering this + * as real radar. + */ + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs confirmation not received from fw, considering as radar\n"); + goto radar_detected; + } + +radar_detected: + ath10k_radar_detected(ar); + + /* Reset state to allow sending confirmation on consecutive radar + * detections, unless radar confirmation is disabled/stopped. + */ +wait_complete: + spin_lock_bh(&ar->data_lock); + if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED) + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; + spin_unlock_bh(&ar->data_lock); +} + static void ath10k_dfs_radar_report(struct ath10k *ar, struct wmi_phyerr_ev_arg *phyerr, const struct phyerr_radar_report *rr, @@ -3694,8 +3759,10 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, u32 reg0, reg1, tsf32l; struct ieee80211_channel *ch; struct pulse_event pe; + struct radar_detector_specs rs; u64 tsf64; u8 rssi, width; + struct ath10k_radar_found_info *radar_info; reg0 = __le32_to_cpu(rr->reg0); reg1 = __le32_to_cpu(rr->reg1); @@ -3760,25 +3827,46 @@ static void ath10k_dfs_radar_report(struct ath10k *ar, ATH10K_DFS_STAT_INC(ar, pulses_detected); - if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) { + if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) { ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs no pulse pattern detected, yet\n"); return; } -radar_detected: - ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n"); - ATH10K_DFS_STAT_INC(ar, radar_detected); + if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) && + ar->dfs_detector->region == NL80211_DFS_FCC) { + /* Consecutive radar indications need not be + * sent to the firmware until we get confirmation + * for the previous detected radar. + */ + spin_lock_bh(&ar->data_lock); + if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) { + spin_unlock_bh(&ar->data_lock); + return; + } + ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS; + radar_info = &ar->last_radar_info; - /* Control radar events reporting in debugfs file - * dfs_block_radar_events - */ - if (ar->dfs_block_radar_events) { - ath10k_info(ar, "DFS Radar detected, but ignored as requested\n"); + radar_info->pri_min = rs.pri_min; + radar_info->pri_max = rs.pri_max; + radar_info->width_min = rs.width_min; + radar_info->width_max = rs.width_max; + /*TODO Find sidx_min and sidx_max */ + radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); + radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX); + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", + radar_info->pri_min, radar_info->pri_max, + radar_info->width_min, radar_info->width_max, + radar_info->sidx_min, radar_info->sidx_max); + ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work); + spin_unlock_bh(&ar->data_lock); return; } - ieee80211_radar_detected(ar->hw); +radar_detected: + ath10k_radar_detected(ar); } static int ath10k_dfs_fft_report(struct ath10k *ar, @@ -4128,6 +4216,47 @@ void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb) } } +static int +ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb, + struct wmi_dfs_status_ev_arg *arg) +{ + struct wmi_dfs_status_ev_arg *ev = (void *)skb->data; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + arg->status = ev->status; + + return 0; +} + +static void +ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_dfs_status_ev_arg status_arg = {}; + int ret; + + ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg); + + if (ret) { + ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_REGULATORY, + "dfs status event received from fw: %d\n", + status_arg.status); + + /* Even in case of radar detection failure we follow the same + * behaviour as if radar is detected i.e to switch to a different + * channel. + */ + if (status_arg.status == WMI_HW_RADAR_DETECTED || + status_arg.status == WMI_RADAR_DETECTION_FAIL) + ath10k_radar_detected(ar); + complete(&ar->wmi.radar_confirm); +} + void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb) { struct wmi_roam_ev_arg arg = {}; @@ -4357,7 +4486,7 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar, rate_code[i], type); snprintf(buff, sizeof(buff), "%8d ", tpc[j]); - strncat(tpc_value, buff, strlen(buff)); + strlcat(tpc_value, buff, sizeof(tpc_value)); } tpc_stats->tpc_table[type].pream_idx[i] = pream_idx; tpc_stats->tpc_table[type].rate_code[i] = rate_code[i]; @@ -4479,6 +4608,12 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb) num_tx_chain = __le32_to_cpu(ev->num_tx_chain); + if (num_tx_chain > WMI_TPC_TX_N_CHAIN) { + ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n", + num_tx_chain, WMI_TPC_TX_N_CHAIN); + return; + } + ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table, num_tx_chain); @@ -4694,7 +4829,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar, rate_code[i], type, pream_idx); snprintf(buff, sizeof(buff), "%8d ", tpc[j]); - strncat(tpc_value, buff, strlen(buff)); + strlcat(tpc_value, buff, sizeof(tpc_value)); } tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx; tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i]; @@ -5059,7 +5194,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) return; } - memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map, arg.service_map_len); @@ -5269,6 +5403,21 @@ int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb) return 0; } +void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb) +{ + int ret; + struct wmi_svc_avail_ev_arg arg = {}; + + ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse service available event: %d\n", + ret); + } + + ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map, + __le32_to_cpu(arg.service_map_ext_len)); +} + static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb) { const struct wmi_pdev_temperature_event *ev; @@ -5465,6 +5614,9 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_ready(ar, skb); ath10k_wmi_queue_set_coverage_class_work(ar); break; + case WMI_SERVICE_AVAILABLE_EVENTID: + ath10k_wmi_event_service_available(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -5856,6 +6008,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_PDEV_TPC_TABLE_EVENTID: ath10k_wmi_event_tpc_final_table(ar, skb); break; + case WMI_10_4_DFS_STATUS_CHECK_EVENTID: + ath10k_wmi_event_dfs_status_check(ar, skb); + break; default: ath10k_warn(ar, "Unknown eventid: %d\n", id); break; @@ -5880,6 +6035,8 @@ int ath10k_wmi_connect(struct ath10k *ar) struct ath10k_htc_svc_conn_req conn_req; struct ath10k_htc_svc_conn_resp conn_resp; + memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map)); + memset(&conn_req, 0, sizeof(conn_req)); memset(&conn_resp, 0, sizeof(conn_resp)); @@ -7648,7 +7805,7 @@ ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param) cmd->param = __cpu_to_le32(param); ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi pdev get tcp config param:%d\n", param); + "wmi pdev get tpc config param %d\n", param); return skb; } @@ -7768,7 +7925,7 @@ ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev, len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "HW rate", pdev->data_rc); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", - "Sched self tiggers", pdev->self_triggers); + "Sched self triggers", pdev->self_triggers); len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", "Dropped due to SW retries", pdev->sw_retry_failure); @@ -8440,6 +8597,32 @@ ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, } static struct sk_buff * +ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar, + const struct ath10k_radar_found_info *arg) +{ + struct wmi_radar_found_info *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_radar_found_info *)skb->data; + cmd->pri_min = __cpu_to_le32(arg->pri_min); + cmd->pri_max = __cpu_to_le32(arg->pri_max); + cmd->width_min = __cpu_to_le32(arg->width_min); + cmd->width_max = __cpu_to_le32(arg->width_max); + cmd->sidx_min = __cpu_to_le32(arg->sidx_min); + cmd->sidx_max = __cpu_to_le32(arg->sidx_max); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n", + arg->pri_min, arg->pri_max, arg->width_min, + arg->width_max, arg->sidx_min, arg->sidx_max); + return skb; +} + +static struct sk_buff * ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) { struct wmi_echo_cmd *cmd; @@ -8776,6 +8959,7 @@ static const struct wmi_ops wmi_10_4_ops = { .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, + .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev, .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, @@ -8822,6 +9006,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update, .gen_pdev_get_tpc_table_cmdid = ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid, + .gen_radar_found = ath10k_wmi_10_4_gen_radar_found, /* shared with 10.2 */ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, @@ -8884,8 +9069,11 @@ int ath10k_wmi_attach(struct ath10k *ar) init_completion(&ar->wmi.service_ready); init_completion(&ar->wmi.unified_ready); init_completion(&ar->wmi.barrier); + init_completion(&ar->wmi.radar_confirm); INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work); + INIT_WORK(&ar->radar_confirmation_work, + ath10k_radar_confirmation_work); return 0; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 6fbc84c29521..b48db54e9865 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -201,6 +201,8 @@ enum wmi_service { WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, WMI_SERVICE_TPC_STATS_FINAL, + WMI_SERVICE_RESET_CHIP, + WMI_SERVICE_SPOOF_MAC_SUPPORT, /* keep last */ WMI_SERVICE_MAX, @@ -238,6 +240,8 @@ enum wmi_10x_service { WMI_10X_SERVICE_MESH, WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, WMI_10X_SERVICE_PEER_STATS, + WMI_10X_SERVICE_RESET_CHIP, + WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, }; enum wmi_main_service { @@ -548,6 +552,10 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); SVCMAP(WMI_10X_SERVICE_PEER_STATS, WMI_SERVICE_PEER_STATS, len); + SVCMAP(WMI_10X_SERVICE_RESET_CHIP, + WMI_SERVICE_RESET_CHIP, len); + SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len); } static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, @@ -783,6 +791,7 @@ struct wmi_cmd_map { u32 stop_scan_cmdid; u32 scan_chan_list_cmdid; u32 scan_sch_prio_tbl_cmdid; + u32 scan_prob_req_oui_cmdid; u32 pdev_set_regdomain_cmdid; u32 pdev_set_channel_cmdid; u32 pdev_set_param_cmdid; @@ -960,6 +969,7 @@ struct wmi_cmd_map { u32 vdev_sifs_trigger_time_cmdid; u32 pdev_wds_entry_list_cmdid; u32 tdls_set_offchan_mode_cmdid; + u32 radar_found_cmdid; }; /* @@ -1183,6 +1193,7 @@ enum wmi_cmd_id { enum wmi_event_id { WMI_SERVICE_READY_EVENTID = 0x1, WMI_READY_EVENTID, + WMI_SERVICE_AVAILABLE_EVENTID, /* Scan specific events */ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN), @@ -1793,6 +1804,11 @@ enum wmi_10_4_cmd_id { WMI_10_4_TDLS_SET_STATE_CMDID, WMI_10_4_TDLS_PEER_UPDATE_CMDID, WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, + WMI_10_4_PDEV_SEND_FD_CMDID, + WMI_10_4_ENABLE_FILS_CMDID, + WMI_10_4_PDEV_SET_BRIDGE_MACADDR_CMDID, + WMI_10_4_ATF_GROUP_WMM_AC_CONFIG_REQUEST_CMDID, + WMI_10_4_RADAR_FOUND_CMDID, WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, }; @@ -1868,6 +1884,9 @@ enum wmi_10_4_event_id { WMI_10_4_PDEV_TPC_TABLE_EVENTID, WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID, WMI_10_4_TDLS_PEER_EVENTID, + WMI_10_4_HOST_SWFDA_EVENTID, + WMI_10_4_ESP_ESTIMATE_EVENTID, + WMI_10_4_DFS_STATUS_CHECK_EVENTID, WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, }; @@ -3159,6 +3178,8 @@ struct wmi_start_scan_arg { u16 channels[64]; struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID]; struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID]; + struct wmi_mac_addr mac_addr; + struct wmi_mac_addr mac_mask; }; /* scan control flags */ @@ -3182,6 +3203,12 @@ struct wmi_start_scan_arg { */ #define WMI_SCAN_CONTINUE_ON_ERROR 0x80 +/* Use random MAC address for TA for Probe Request frame and add + * OUI specified by WMI_SCAN_PROB_REQ_OUI_CMDID to the Probe Request frame. + * if OUI is not set by WMI_SCAN_PROB_REQ_OUI_CMDID then the flag is ignored. + */ +#define WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ 0x1000 + /* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */ #define WMI_SCAN_CLASS_MASK 0xFF000000 @@ -3380,6 +3407,25 @@ struct wmi_10_4_phyerr_event { u8 buf[0]; } __packed; +struct wmi_radar_found_info { + __le32 pri_min; + __le32 pri_max; + __le32 width_min; + __le32 width_max; + __le32 sidx_min; + __le32 sidx_max; +} __packed; + +enum wmi_radar_confirmation_status { + /* Detected radar was due to SW pulses */ + WMI_SW_RADAR_DETECTED = 0, + + WMI_RADAR_DETECTION_FAIL = 1, + + /* Real radar detected */ + WMI_HW_RADAR_DETECTED = 2, +}; + #define PHYERR_TLV_SIG 0xBB #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8 @@ -4008,9 +4054,9 @@ struct wmi_pdev_get_tpc_config_cmd { } __packed; #define WMI_TPC_CONFIG_PARAM 1 -#define WMI_TPC_RATE_MAX 160 #define WMI_TPC_FINAL_RATE_MAX 240 #define WMI_TPC_TX_N_CHAIN 4 +#define WMI_TPC_RATE_MAX (WMI_TPC_TX_N_CHAIN * 65) #define WMI_TPC_PREAM_TABLE_MAX 10 #define WMI_TPC_FLAG 3 #define WMI_TPC_BUF_SIZE 10 @@ -6613,6 +6659,10 @@ struct wmi_phyerr_hdr_arg { const void *phyerrs; }; +struct wmi_dfs_status_ev_arg { + u32 status; +}; + struct wmi_svc_rdy_ev_arg { __le32 min_tx_power; __le32 max_tx_power; @@ -6632,6 +6682,11 @@ struct wmi_svc_rdy_ev_arg { const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS]; }; +struct wmi_svc_avail_ev_arg { + __le32 service_map_ext_len; + const __le32 *service_map_ext; +}; + struct wmi_rdy_ev_arg { __le32 sw_version; __le32 abi_version; @@ -6812,6 +6867,10 @@ struct wmi_wow_ev_arg { #define WOW_MIN_PATTERN_SIZE 1 #define WOW_MAX_PATTERN_SIZE 148 #define WOW_MAX_PKT_OFFSET 128 +#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \ + sizeof(struct rfc1042_hdr)) +#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \ + offsetof(struct ieee80211_hdr_3addr, addr1)) enum wmi_tdls_state { WMI_TDLS_DISABLE, @@ -7052,6 +7111,7 @@ void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb); +void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf, int left_len, struct wmi_phyerr_ev_arg *arg); void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar, diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index c4cbccb29b31..a6b179f88d36 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2015-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -76,6 +77,109 @@ static int ath10k_wow_cleanup(struct ath10k *ar) return 0; } +/** + * Convert a 802.3 format to a 802.11 format. + * +------------+-----------+--------+----------------+ + * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | + * +------------+-----------+--------+----------------+ + * |__ |_______ |____________ |________ + * | | | | + * +--+------------+----+-----------+---------------+-----------+ + * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | + * +--+------------+----+-----------+---------------+-----------+ + */ +static void ath10k_wow_convert_8023_to_80211 + (struct cfg80211_pkt_pattern *new, + const struct cfg80211_pkt_pattern *old) +{ + u8 hdr_8023_pattern[ETH_HLEN] = {}; + u8 hdr_8023_bit_mask[ETH_HLEN] = {}; + u8 hdr_80211_pattern[WOW_HDR_LEN] = {}; + u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {}; + + int total_len = old->pkt_offset + old->pattern_len; + int hdr_80211_end_offset; + + struct ieee80211_hdr_3addr *new_hdr_pattern = + (struct ieee80211_hdr_3addr *)hdr_80211_pattern; + struct ieee80211_hdr_3addr *new_hdr_mask = + (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask; + struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern; + struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask; + int hdr_len = sizeof(*new_hdr_pattern); + + struct rfc1042_hdr *new_rfc_pattern = + (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len); + struct rfc1042_hdr *new_rfc_mask = + (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len); + int rfc_len = sizeof(*new_rfc_pattern); + + memcpy(hdr_8023_pattern + old->pkt_offset, + old->pattern, ETH_HLEN - old->pkt_offset); + memcpy(hdr_8023_bit_mask + old->pkt_offset, + old->mask, ETH_HLEN - old->pkt_offset); + + /* Copy destination address */ + memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN); + memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN); + + /* Copy source address */ + memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN); + memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN); + + /* Copy logic link type */ + memcpy(&new_rfc_pattern->snap_type, + &old_hdr_pattern->h_proto, + sizeof(old_hdr_pattern->h_proto)); + memcpy(&new_rfc_mask->snap_type, + &old_hdr_mask->h_proto, + sizeof(old_hdr_mask->h_proto)); + + /* Caculate new pkt_offset */ + if (old->pkt_offset < ETH_ALEN) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr1); + else if (old->pkt_offset < offsetof(struct ethhdr, h_proto)) + new->pkt_offset = old->pkt_offset + + offsetof(struct ieee80211_hdr_3addr, addr3) - + offsetof(struct ethhdr, h_source); + else + new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN; + + /* Caculate new hdr end offset */ + if (total_len > ETH_HLEN) + hdr_80211_end_offset = hdr_len + rfc_len; + else if (total_len > offsetof(struct ethhdr, h_proto)) + hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN; + else if (total_len > ETH_ALEN) + hdr_80211_end_offset = total_len - ETH_ALEN + + offsetof(struct ieee80211_hdr_3addr, addr3); + else + hdr_80211_end_offset = total_len + + offsetof(struct ieee80211_hdr_3addr, addr1); + + new->pattern_len = hdr_80211_end_offset - new->pkt_offset; + + memcpy((u8 *)new->pattern, + hdr_80211_pattern + new->pkt_offset, + new->pattern_len); + memcpy((u8 *)new->mask, + hdr_80211_bit_mask + new->pkt_offset, + new->pattern_len); + + if (total_len > ETH_HLEN) { + /* Copy frame body */ + memcpy((u8 *)new->pattern + new->pattern_len, + (void *)old->pattern + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + memcpy((u8 *)new->mask + new->pattern_len, + (void *)old->mask + ETH_HLEN - old->pkt_offset, + total_len - ETH_HLEN); + + new->pattern_len += total_len - ETH_HLEN; + } +} + static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, struct cfg80211_wowlan *wowlan) { @@ -116,22 +220,40 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif, for (i = 0; i < wowlan->n_patterns; i++) { u8 bitmask[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {}; + u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {}; + struct cfg80211_pkt_pattern new_pattern = {}; + struct cfg80211_pkt_pattern old_pattern = patterns[i]; int j; + new_pattern.pattern = ath_pattern; + new_pattern.mask = ath_bitmask; if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE) continue; - /* convert bytemask to bitmask */ for (j = 0; j < patterns[i].pattern_len; j++) if (patterns[i].mask[j / 8] & BIT(j % 8)) bitmask[j] = 0xff; + old_pattern.mask = bitmask; + new_pattern = old_pattern; + + if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { + if (patterns[i].pkt_offset < ETH_HLEN) + ath10k_wow_convert_8023_to_80211(&new_pattern, + &old_pattern); + else + new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN; + } + + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id, pattern_id, - patterns[i].pattern, - bitmask, - patterns[i].pattern_len, - patterns[i].pkt_offset); + new_pattern.pattern, + new_pattern.mask, + new_pattern.pattern_len, + new_pattern.pkt_offset); if (ret) { ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n", pattern_id, @@ -345,6 +467,12 @@ int ath10k_wow_init(struct ath10k *ar) return -EINVAL; ar->wow.wowlan_support = ath10k_wowlan_support; + + if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) { + ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; + ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; + } + ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index e23d450babd2..0d30e762c090 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -914,7 +914,7 @@ void ath6kl_tx_data_cleanup(struct ath6kl *ar); struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar); void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie); -int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev); struct aggr_info *aggr_init(struct ath6kl_vif *vif); void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info, diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c index 0f965e9f38a4..4e94b22eaada 100644 --- a/drivers/net/wireless/ath/ath6kl/debug.c +++ b/drivers/net/wireless/ath/ath6kl/debug.c @@ -645,7 +645,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "CRC Err", tgt_stats->rx_crc_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", - "Key chache miss", tgt_stats->rx_key_cache_miss); + "Key cache miss", tgt_stats->rx_key_cache_miss); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", "Decrypt Err", tgt_stats->rx_decrypt_err); len += scnprintf(buf + len, buf_len - len, "%20s %10llu\n", diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index db95f85751e3..808fb30be9ad 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -426,7 +426,7 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, { u8 *ies = NULL, *wpa_ie = NULL, *pos; size_t ies_len = 0; - struct station_info sinfo; + struct station_info *sinfo; ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); @@ -482,16 +482,20 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, keymgmt, ucipher, auth, apsd_info); /* send event to application */ - memset(&sinfo, 0, sizeof(sinfo)); + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return; /* TODO: sinfo.generation */ - sinfo.assoc_req_ies = ies; - sinfo.assoc_req_ies_len = ies_len; + sinfo->assoc_req_ies = ies; + sinfo->assoc_req_ies_len = ies_len; - cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL); + cfg80211_new_sta(vif->ndev, mac_addr, sinfo, GFP_KERNEL); netif_wake_queue(vif->ndev); + + kfree(sinfo); } void disconnect_timer_handler(struct timer_list *t) diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 8da9506f8c2b..618d12ed4b40 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -353,7 +353,7 @@ fail_ctrl_tx: return status; } -int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev) { struct ath6kl *ar = ath6kl_priv(dev); struct ath6kl_cookie *cookie = NULL; diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c index 6fee9a464cce..acb9602aa464 100644 --- a/drivers/net/wireless/ath/ath9k/dfs.c +++ b/drivers/net/wireless/ath/ath9k/dfs.c @@ -41,7 +41,7 @@ static const int BIN_DELTA_MAX = 10; /* we need at least 3 deltas / 4 samples for a reliable chirp detection */ #define NUM_DIFFS 3 -static const int FFT_NUM_SAMPLES = (NUM_DIFFS + 1); +#define FFT_NUM_SAMPLES (NUM_DIFFS + 1) /* Threshold for difference of delta peaks */ static const int MAX_DIFF = 2; @@ -114,7 +114,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data, ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n", datalen, num_fft_packets); - if (num_fft_packets < (FFT_NUM_SAMPLES)) { + if (num_fft_packets < FFT_NUM_SAMPLES) { ath_dbg(common, DFS, "not enough packets for chirp\n"); return false; } @@ -136,7 +136,7 @@ static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data, return false; ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n", datalen, num_fft_packets); - if (num_fft_packets < (FFT_NUM_SAMPLES)) { + if (num_fft_packets < FFT_NUM_SAMPLES) { ath_dbg(common, DFS, "not enough packets for chirp\n"); return false; } @@ -277,7 +277,7 @@ ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe) DFS_STAT_INC(sc, pulses_processed); if (pd == NULL) return; - if (!pd->add_pulse(pd, pe)) + if (!pd->add_pulse(pd, pe, NULL)) return; DFS_STAT_INC(sc, radar_detected); ieee80211_radar_detected(sc->hw); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a3be8add56e1..b6663c80e7dd 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2544,7 +2544,8 @@ static void ath9k_unassign_vif_chanctx(struct ieee80211_hw *hw, } static void ath9k_mgd_prepare_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + u16 duration) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 448b83eea810..d52b31b45df7 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -268,7 +268,8 @@ static void dpd_exit(struct dfs_pattern_detector *dpd) } static bool -dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event) +dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event, + struct radar_detector_specs *rs) { u32 i; struct channel_detector *cd; @@ -294,6 +295,8 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event) struct pri_detector *pd = cd->detectors[i]; struct pri_sequence *ps = pd->add_pulse(pd, event); if (ps != NULL) { + if (rs != NULL) + memcpy(rs, pd->rs, sizeof(*rs)); ath_dbg(dpd->common, DFS, "DFS: radar found on freq=%d: id=%d, pri=%d, " "count=%d, count_false=%d\n", diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.h b/drivers/net/wireless/ath/dfs_pattern_detector.h index 92be3530e9b5..18db6f4f3568 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.h +++ b/drivers/net/wireless/ath/dfs_pattern_detector.h @@ -97,7 +97,8 @@ struct dfs_pattern_detector { bool (*set_dfs_domain)(struct dfs_pattern_detector *dpd, enum nl80211_dfs_regions region); bool (*add_pulse)(struct dfs_pattern_detector *dpd, - struct pulse_event *pe); + struct pulse_event *pe, + struct radar_detector_specs *rs); struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd); enum nl80211_dfs_regions region; diff --git a/drivers/net/wireless/ath/dfs_pri_detector.h b/drivers/net/wireless/ath/dfs_pri_detector.h index 79f0fff4d1e6..86339f2b4d3a 100644 --- a/drivers/net/wireless/ath/dfs_pri_detector.h +++ b/drivers/net/wireless/ath/dfs_pri_detector.h @@ -62,8 +62,9 @@ struct pri_detector { (*add_pulse)(struct pri_detector *de, struct pulse_event *e); void (*reset) (struct pri_detector *de, u64 ts); -/* private: internal use only */ const struct radar_detector_specs *rs; + +/* private: internal use only */ u64 last_ts; struct list_head sequences; struct list_head pulses; diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 5d80be213fac..d73e45e26547 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -68,12 +68,14 @@ enum CountryCode { CTRY_AUSTRALIA = 36, CTRY_AUSTRIA = 40, CTRY_AZERBAIJAN = 31, + CTRY_BAHAMAS = 44, CTRY_BAHRAIN = 48, CTRY_BANGLADESH = 50, CTRY_BARBADOS = 52, CTRY_BELARUS = 112, CTRY_BELGIUM = 56, CTRY_BELIZE = 84, + CTRY_BERMUDA = 60, CTRY_BOLIVIA = 68, CTRY_BOSNIA_HERZ = 70, CTRY_BRAZIL = 76, @@ -136,8 +138,10 @@ enum CountryCode { CTRY_MACEDONIA = 807, CTRY_MALAYSIA = 458, CTRY_MALTA = 470, + CTRY_MAURITIUS = 480, CTRY_MEXICO = 484, CTRY_MONACO = 492, + CTRY_MONTENEGRO = 499, CTRY_MOROCCO = 504, CTRY_NEPAL = 524, CTRY_NETHERLANDS = 528, @@ -159,6 +163,7 @@ enum CountryCode { CTRY_ROMANIA = 642, CTRY_RUSSIA = 643, CTRY_SAUDI_ARABIA = 682, + CTRY_SERBIA = 688, CTRY_SERBIA_MONTENEGRO = 891, CTRY_SINGAPORE = 702, CTRY_SLOVAKIA = 703, @@ -170,11 +175,13 @@ enum CountryCode { CTRY_SWITZERLAND = 756, CTRY_SYRIA = 760, CTRY_TAIWAN = 158, + CTRY_TANZANIA = 834, CTRY_THAILAND = 764, CTRY_TRINIDAD_Y_TOBAGO = 780, CTRY_TUNISIA = 788, CTRY_TURKEY = 792, CTRY_UAE = 784, + CTRY_UGANDA = 800, CTRY_UKRAINE = 804, CTRY_UNITED_KINGDOM = 826, CTRY_UNITED_STATES = 840, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index bdd2b4d61f2f..4021e37a225a 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -35,6 +35,7 @@ enum EnumRd { FRANCE_RES = 0x31, FCC3_FCCA = 0x3A, FCC3_WORLD = 0x3B, + FCC3_ETSIC = 0x3F, ETSI1_WORLD = 0x37, ETSI3_ETSIA = 0x32, @@ -44,6 +45,8 @@ enum EnumRd { ETSI4_ETSIC = 0x38, ETSI5_WORLD = 0x39, ETSI6_WORLD = 0x34, + ETSI8_WORLD = 0x3D, + ETSI9_WORLD = 0x3E, ETSI_RESERVED = 0x33, MKK1_MKKA = 0x40, @@ -59,6 +62,7 @@ enum EnumRd { MKK1_MKKA1 = 0x4A, MKK1_MKKA2 = 0x4B, MKK1_MKKC = 0x4C, + APL2_FCCA = 0x4D, APL3_FCCA = 0x50, APL1_WORLD = 0x52, @@ -67,6 +71,7 @@ enum EnumRd { APL1_ETSIC = 0x55, APL2_ETSIC = 0x56, APL5_WORLD = 0x58, + APL13_WORLD = 0x5A, APL6_WORLD = 0x5B, APL7_FCCA = 0x5C, APL8_WORLD = 0x5D, @@ -168,6 +173,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, {FCC3_FCCA, CTL_FCC, CTL_FCC}, {FCC3_WORLD, CTL_FCC, CTL_ETSI}, + {FCC3_ETSIC, CTL_FCC, CTL_ETSI}, {FCC4_FCCA, CTL_FCC, CTL_FCC}, {FCC5_FCCA, CTL_FCC, CTL_FCC}, {FCC6_FCCA, CTL_FCC, CTL_FCC}, @@ -179,6 +185,8 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI8_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI9_WORLD, CTL_ETSI, CTL_ETSI}, /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, @@ -188,9 +196,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {FCC1_FCCA, CTL_FCC, CTL_FCC}, {APL1_WORLD, CTL_FCC, CTL_ETSI}, {APL2_WORLD, CTL_FCC, CTL_ETSI}, + {APL2_FCCA, CTL_FCC, CTL_FCC}, {APL3_WORLD, CTL_FCC, CTL_ETSI}, {APL4_WORLD, CTL_FCC, CTL_ETSI}, {APL5_WORLD, CTL_FCC, CTL_ETSI}, + {APL13_WORLD, CTL_ETSI, CTL_ETSI}, {APL6_WORLD, CTL_ETSI, CTL_ETSI}, {APL8_WORLD, CTL_ETSI, CTL_ETSI}, {APL9_WORLD, CTL_ETSI, CTL_ETSI}, @@ -289,37 +299,39 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { static struct country_code_to_enum_rd allCountries[] = { {CTRY_DEBUG, NO_ENUMRD, "DB"}, {CTRY_DEFAULT, FCC1_FCCA, "CO"}, - {CTRY_ALBANIA, NULL1_WORLD, "AL"}, - {CTRY_ALGERIA, NULL1_WORLD, "DZ"}, + {CTRY_ALBANIA, ETSI1_WORLD, "AL"}, + {CTRY_ALGERIA, APL13_WORLD, "DZ"}, {CTRY_ARGENTINA, FCC3_WORLD, "AR"}, {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, {CTRY_ARUBA, ETSI1_WORLD, "AW"}, - {CTRY_AUSTRALIA, FCC2_WORLD, "AU"}, + {CTRY_AUSTRALIA, FCC3_WORLD, "AU"}, {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, + {CTRY_BAHAMAS, FCC3_WORLD, "BS"}, {CTRY_BAHRAIN, APL6_WORLD, "BH"}, - {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, + {CTRY_BANGLADESH, APL1_WORLD, "BD"}, {CTRY_BARBADOS, FCC2_WORLD, "BB"}, {CTRY_BELARUS, ETSI1_WORLD, "BY"}, {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, {CTRY_BELIZE, APL1_ETSIC, "BZ"}, + {CTRY_BERMUDA, FCC3_FCCA, "BM"}, {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, {CTRY_BRAZIL, FCC3_WORLD, "BR"}, - {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"}, - {CTRY_BULGARIA, ETSI6_WORLD, "BG"}, + {CTRY_BRUNEI_DARUSSALAM, APL6_WORLD, "BN"}, + {CTRY_BULGARIA, ETSI1_WORLD, "BG"}, {CTRY_CAMBODIA, ETSI1_WORLD, "KH"}, {CTRY_CANADA, FCC3_FCCA, "CA"}, {CTRY_CANADA2, FCC6_FCCA, "CA"}, {CTRY_CHILE, APL6_WORLD, "CL"}, {CTRY_CHINA, APL1_WORLD, "CN"}, - {CTRY_COLOMBIA, FCC1_FCCA, "CO"}, + {CTRY_COLOMBIA, FCC3_WORLD, "CO"}, {CTRY_COSTA_RICA, FCC1_WORLD, "CR"}, {CTRY_CROATIA, ETSI1_WORLD, "HR"}, {CTRY_CYPRUS, ETSI1_WORLD, "CY"}, - {CTRY_CZECH, ETSI3_WORLD, "CZ"}, + {CTRY_CZECH, ETSI1_WORLD, "CZ"}, {CTRY_DENMARK, ETSI1_WORLD, "DK"}, {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO"}, {CTRY_ECUADOR, FCC1_WORLD, "EC"}, @@ -336,7 +348,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_GUAM, FCC1_FCCA, "GU"}, {CTRY_GUATEMALA, FCC1_FCCA, "GT"}, {CTRY_HAITI, ETSI1_WORLD, "HT"}, - {CTRY_HONDURAS, NULL1_WORLD, "HN"}, + {CTRY_HONDURAS, FCC3_WORLD, "HN"}, {CTRY_HONG_KONG, FCC3_WORLD, "HK"}, {CTRY_HUNGARY, ETSI1_WORLD, "HU"}, {CTRY_ICELAND, ETSI1_WORLD, "IS"}, @@ -344,7 +356,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_INDONESIA, NULL1_WORLD, "ID"}, {CTRY_IRAN, APL1_WORLD, "IR"}, {CTRY_IRELAND, ETSI1_WORLD, "IE"}, - {CTRY_ISRAEL, NULL1_WORLD, "IL"}, + {CTRY_ISRAEL, ETSI3_WORLD, "IL"}, {CTRY_ITALY, ETSI1_WORLD, "IT"}, {CTRY_JAMAICA, FCC3_WORLD, "JM"}, @@ -409,6 +421,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_JORDAN, ETSI2_WORLD, "JO"}, {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ"}, + {CTRY_KENYA, APL1_WORLD, "KE"}, {CTRY_KOREA_NORTH, APL9_WORLD, "KP"}, {CTRY_KOREA_ROC, APL9_WORLD, "KR"}, {CTRY_KOREA_ROC2, APL2_WORLD, "K2"}, @@ -420,32 +433,37 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_LITHUANIA, ETSI1_WORLD, "LT"}, {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU"}, {CTRY_MACAU, FCC2_WORLD, "MO"}, - {CTRY_MACEDONIA, NULL1_WORLD, "MK"}, - {CTRY_MALAYSIA, APL8_WORLD, "MY"}, + {CTRY_MACEDONIA, ETSI1_WORLD, "MK"}, + {CTRY_MALAYSIA, FCC1_WORLD, "MY"}, {CTRY_MALTA, ETSI1_WORLD, "MT"}, + {CTRY_MAURITIUS, ETSI1_WORLD, "MU"}, {CTRY_MEXICO, FCC1_FCCA, "MX"}, {CTRY_MONACO, ETSI4_WORLD, "MC"}, + {CTRY_MONTENEGRO, ETSI1_WORLD, "ME"}, {CTRY_MOROCCO, APL4_WORLD, "MA"}, {CTRY_NEPAL, APL1_WORLD, "NP"}, {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"}, - {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"}, + {CTRY_NEW_ZEALAND, FCC3_ETSIC, "NZ"}, + {CTRY_NICARAGUA, FCC3_FCCA, "NI"}, {CTRY_NORWAY, ETSI1_WORLD, "NO"}, {CTRY_OMAN, FCC3_WORLD, "OM"}, {CTRY_PAKISTAN, NULL1_WORLD, "PK"}, {CTRY_PANAMA, FCC1_FCCA, "PA"}, {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, - {CTRY_PERU, APL1_WORLD, "PE"}, - {CTRY_PHILIPPINES, APL1_WORLD, "PH"}, + {CTRY_PARAGUAY, FCC3_WORLD, "PY"}, + {CTRY_PERU, FCC3_WORLD, "PE"}, + {CTRY_PHILIPPINES, FCC3_WORLD, "PH"}, {CTRY_POLAND, ETSI1_WORLD, "PL"}, {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, {CTRY_QATAR, APL1_WORLD, "QA"}, - {CTRY_ROMANIA, NULL1_WORLD, "RO"}, - {CTRY_RUSSIA, NULL1_WORLD, "RU"}, + {CTRY_ROMANIA, ETSI1_WORLD, "RO"}, + {CTRY_RUSSIA, ETSI8_WORLD, "RU"}, {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, + {CTRY_SERBIA, ETSI1_WORLD, "RS"}, {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, - {CTRY_SINGAPORE, APL6_WORLD, "SG"}, + {CTRY_SINGAPORE, FCC3_WORLD, "SG"}, {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, {CTRY_SLOVENIA, ETSI1_WORLD, "SI"}, {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA"}, @@ -455,11 +473,13 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, {CTRY_SYRIA, NULL1_WORLD, "SY"}, {CTRY_TAIWAN, APL3_FCCA, "TW"}, + {CTRY_TANZANIA, APL1_WORLD, "TZ"}, {CTRY_THAILAND, FCC3_WORLD, "TH"}, {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, {CTRY_TURKEY, ETSI3_WORLD, "TR"}, - {CTRY_UKRAINE, NULL1_WORLD, "UA"}, + {CTRY_UGANDA, FCC3_WORLD, "UG"}, + {CTRY_UKRAINE, ETSI9_WORLD, "UA"}, {CTRY_UAE, NULL1_WORLD, "AE"}, {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, {CTRY_UNITED_STATES, FCC3_FCCA, "US"}, @@ -472,7 +492,7 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_VENEZUELA, APL2_ETSIC, "VE"}, {CTRY_VIET_NAM, NULL1_WORLD, "VN"}, {CTRY_YEMEN, NULL1_WORLD, "YE"}, - {CTRY_ZIMBABWE, NULL1_WORLD, "ZW"}, + {CTRY_ZIMBABWE, ETSI1_WORLD, "ZW"}, }; #endif diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile index 3b09435104eb..582049f65735 100644 --- a/drivers/net/wireless/ath/wcn36xx/Makefile +++ b/drivers/net/wireless/ath/wcn36xx/Makefile @@ -6,3 +6,5 @@ wcn36xx-y += main.o \ smd.o \ pmc.o \ debug.o + +wcn36xx-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 2c3b899a88fa..06cfe8d311f3 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -78,7 +78,6 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch) if (!cur_ctl) goto out_fail; - spin_lock_init(&cur_ctl->skb_lock); cur_ctl->ctl_blk_order = i; if (i == 0) { ch->head_blk_ctl = cur_ctl; @@ -275,12 +274,14 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) return 0; } -static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl) +static int wcn36xx_dxe_fill_skb(struct device *dev, + struct wcn36xx_dxe_ctl *ctl, + gfp_t gfp) { struct wcn36xx_dxe_desc *dxe = ctl->desc; struct sk_buff *skb; - skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC); + skb = alloc_skb(WCN36XX_PKT_SIZE, gfp); if (skb == NULL) return -ENOMEM; @@ -307,7 +308,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, cur_ctl = wcn_ch->head_blk_ctl; for (i = 0; i < wcn_ch->desc_num; i++) { - wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl); + wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL); cur_ctl = cur_ctl->next; } @@ -367,9 +368,11 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) spin_lock_irqsave(&ch->lock, flags); ctl = ch->tail_blk_ctl; do { - if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD) + if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD) break; - if (ctl->skb) { + + if (ctl->skb && + READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) { dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, ctl->skb->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(ctl->skb); @@ -377,18 +380,16 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) /* Keep frame until TX status comes */ ieee80211_free_txskb(wcn->hw, ctl->skb); } - spin_lock(&ctl->skb_lock); + if (wcn->queues_stopped) { wcn->queues_stopped = false; ieee80211_wake_queues(wcn->hw); } - spin_unlock(&ctl->skb_lock); ctl->skb = NULL; } ctl = ctl->next; - } while (ctl != ch->head_blk_ctl && - !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)); + } while (ctl != ch->head_blk_ctl); ch->tail_blk_ctl = ctl; spin_unlock_irqrestore(&ch->lock, flags); @@ -431,8 +432,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) WCN36XX_INT_MASK_CHAN_TX_H); } - wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n"); - reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); + wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n", + int_reason); + + if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | + WCN36XX_CH_STAT_INT_ED_MASK)) + reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); } if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) { @@ -466,8 +471,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) WCN36XX_INT_MASK_CHAN_TX_L); } - wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n"); - reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); + wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n", + int_reason); + + if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | + WCN36XX_CH_STAT_INT_ED_MASK)) + reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); } return IRQ_HANDLED; @@ -477,9 +486,8 @@ static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev) { struct wcn36xx *wcn = (struct wcn36xx *)dev; - disable_irq_nosync(wcn->rx_irq); wcn36xx_dxe_rx_frame(wcn); - enable_irq(wcn->rx_irq); + return IRQ_HANDLED; } @@ -513,27 +521,53 @@ out_err: } static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, - struct wcn36xx_dxe_ch *ch) + struct wcn36xx_dxe_ch *ch, + u32 ctrl, + u32 en_mask, + u32 int_mask, + u32 status_reg) { - struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl; - struct wcn36xx_dxe_desc *dxe = ctl->desc; + struct wcn36xx_dxe_desc *dxe; + struct wcn36xx_dxe_ctl *ctl; dma_addr_t dma_addr; struct sk_buff *skb; - int ret = 0, int_mask; - u32 value; + u32 int_reason; + int ret; - if (ch->ch_type == WCN36XX_DXE_CH_RX_L) { - value = WCN36XX_DXE_CTRL_RX_L; - int_mask = WCN36XX_DXE_INT_CH1_MASK; - } else { - value = WCN36XX_DXE_CTRL_RX_H; - int_mask = WCN36XX_DXE_INT_CH3_MASK; + wcn36xx_dxe_read_register(wcn, status_reg, &int_reason); + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask); + + if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) { + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ERR_CLR, + int_mask); + + wcn36xx_err("DXE IRQ reported error on RX channel\n"); } - while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) { + if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_DONE_CLR, + int_mask); + + if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) + wcn36xx_dxe_write_register(wcn, + WCN36XX_DXE_0_INT_ED_CLR, + int_mask); + + if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | + WCN36XX_CH_STAT_INT_ED_MASK))) + return 0; + + spin_lock(&ch->lock); + + ctl = ch->head_blk_ctl; + dxe = ctl->desc; + + while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) { skb = ctl->skb; dma_addr = dxe->dst_addr_l; - ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl); + ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC); if (0 == ret) { /* new skb allocation ok. Use the new one and queue * the old one to network system. @@ -543,13 +577,16 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, wcn36xx_rx_skb(wcn, skb); } /* else keep old skb not submitted and use it for rx DMA */ - dxe->ctrl = value; + dxe->ctrl = ctrl; ctl = ctl->next; dxe = ctl->desc; } - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask); + wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask); ch->head_blk_ctl = ctl; + + spin_unlock(&ch->lock); + return 0; } @@ -560,19 +597,20 @@ void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn) wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); /* RX_LOW_PRI */ - if (int_src & WCN36XX_DXE_INT_CH1_MASK) { - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, - WCN36XX_DXE_INT_CH1_MASK); - wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch)); - } + if (int_src & WCN36XX_DXE_INT_CH1_MASK) + wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch, + WCN36XX_DXE_CTRL_RX_L, + WCN36XX_DXE_INT_CH1_MASK, + WCN36XX_INT_MASK_CHAN_RX_L, + WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L); /* RX_HIGH_PRI */ - if (int_src & WCN36XX_DXE_INT_CH3_MASK) { - /* Clean up all the INT within this channel */ - wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, - WCN36XX_DXE_INT_CH3_MASK); - wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch)); - } + if (int_src & WCN36XX_DXE_INT_CH3_MASK) + wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch, + WCN36XX_DXE_CTRL_RX_H, + WCN36XX_DXE_INT_CH3_MASK, + WCN36XX_INT_MASK_CHAN_RX_H, + WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H); if (!int_src) wcn36xx_warn("No DXE interrupt pending\n"); @@ -643,8 +681,8 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, struct sk_buff *skb, bool is_low) { - struct wcn36xx_dxe_ctl *ctl = NULL; - struct wcn36xx_dxe_desc *desc = NULL; + struct wcn36xx_dxe_desc *desc_bd, *desc_skb; + struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb; struct wcn36xx_dxe_ch *ch = NULL; unsigned long flags; int ret; @@ -652,73 +690,75 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; spin_lock_irqsave(&ch->lock, flags); - ctl = ch->head_blk_ctl; - - spin_lock(&ctl->next->skb_lock); + ctl_bd = ch->head_blk_ctl; + ctl_skb = ctl_bd->next; /* * If skb is not null that means that we reached the tail of the ring * hence ring is full. Stop queues to let mac80211 back off until ring * has an empty slot again. */ - if (NULL != ctl->next->skb) { + if (NULL != ctl_skb->skb) { ieee80211_stop_queues(wcn->hw); wcn->queues_stopped = true; - spin_unlock(&ctl->next->skb_lock); spin_unlock_irqrestore(&ch->lock, flags); return -EBUSY; } - spin_unlock(&ctl->next->skb_lock); - ctl->skb = NULL; - desc = ctl->desc; + if (unlikely(ctl_skb->bd_cpu_addr)) { + wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); + ret = -EINVAL; + goto unlock; + } + + desc_bd = ctl_bd->desc; + desc_skb = ctl_skb->desc; + + ctl_bd->skb = NULL; /* write buffer descriptor */ - memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd)); + memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd)); /* Set source address of the BD we send */ - desc->src_addr_l = ctl->bd_phy_addr; - - desc->dst_addr_l = ch->dxe_wq; - desc->fr_len = sizeof(struct wcn36xx_tx_bd); - desc->ctrl = ch->ctrl_bd; + desc_bd->src_addr_l = ctl_bd->bd_phy_addr; + desc_bd->dst_addr_l = ch->dxe_wq; + desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd); wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n"); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ", - (char *)desc, sizeof(*desc)); + (char *)desc_bd, sizeof(*desc_bd)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, - "BD >>> ", (char *)ctl->bd_cpu_addr, + "BD >>> ", (char *)ctl_bd->bd_cpu_addr, sizeof(struct wcn36xx_tx_bd)); - /* Set source address of the SKB we send */ - ctl = ctl->next; - ctl->skb = skb; - desc = ctl->desc; - if (ctl->bd_cpu_addr) { - wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); - ret = -EINVAL; + desc_skb->src_addr_l = dma_map_single(wcn->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) { + dev_err(wcn->dev, "unable to DMA map src_addr_l\n"); + ret = -ENOMEM; goto unlock; } - desc->src_addr_l = dma_map_single(wcn->dev, - ctl->skb->data, - ctl->skb->len, - DMA_TO_DEVICE); - - desc->dst_addr_l = ch->dxe_wq; - desc->fr_len = ctl->skb->len; - - /* set dxe descriptor to VALID */ - desc->ctrl = ch->ctrl_skb; + ctl_skb->skb = skb; + desc_skb->dst_addr_l = ch->dxe_wq; + desc_skb->fr_len = ctl_skb->skb->len; wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ", - (char *)desc, sizeof(*desc)); + (char *)desc_skb, sizeof(*desc_skb)); wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ", - (char *)ctl->skb->data, ctl->skb->len); + (char *)ctl_skb->skb->data, ctl_skb->skb->len); /* Move the head of the ring to the next empty descriptor */ - ch->head_blk_ctl = ctl->next; + ch->head_blk_ctl = ctl_skb->next; + + /* Commit all previous writes and set descriptors to VALID */ + wmb(); + desc_skb->ctrl = ch->ctrl_skb; + wmb(); + desc_bd->ctrl = ch->ctrl_bd; /* * When connected and trying to send data frame chip can be in sleep diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h index ce580960d109..31b81b7547a3 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.h +++ b/drivers/net/wireless/ath/wcn36xx/dxe.h @@ -422,7 +422,6 @@ struct wcn36xx_dxe_ctl { unsigned int desc_phy_addr; int ctl_blk_order; struct sk_buff *skb; - spinlock_t skb_lock; void *bd_cpu_addr; dma_addr_t bd_phy_addr; }; diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 182963522941..8abda2760e04 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -88,6 +88,12 @@ /* version string max length (including NULL) */ #define WCN36XX_HAL_VERSION_LENGTH 64 +/* How many frames until we start a-mpdu TX session */ +#define WCN36XX_AMPDU_START_THRESH 20 + +#define WCN36XX_MAX_SCAN_SSIDS 9 +#define WCN36XX_MAX_SCAN_IE_LEN 500 + /* message types for messages exchanged between WDI and HAL */ enum wcn36xx_hal_host_msg_type { /* Init/De-Init */ @@ -1170,7 +1176,7 @@ struct wcn36xx_hal_start_scan_offload_req_msg { /* IE field */ u16 ie_len; - u8 ie[0]; + u8 ie[WCN36XX_MAX_SCAN_IE_LEN]; } __packed; struct wcn36xx_hal_start_scan_offload_rsp_msg { @@ -2230,6 +2236,22 @@ struct wcn36xx_hal_switch_channel_rsp_msg { } __packed; +struct wcn36xx_hal_process_ptt_msg_req_msg { + struct wcn36xx_hal_msg_header header; + + /* Actual FTM Command body */ + u8 ptt_msg[0]; +} __packed; + +struct wcn36xx_hal_process_ptt_msg_rsp_msg { + struct wcn36xx_hal_msg_header header; + + /* FTM Command response status */ + u32 ptt_msg_resp_status; + /* Actual FTM Command body */ + u8 ptt_msg[0]; +} __packed; + struct update_edca_params_req_msg { struct wcn36xx_hal_msg_header header; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 69d6be59d97f..aeb5e6e806be 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -26,6 +26,7 @@ #include <linux/soc/qcom/smem_state.h> #include <linux/soc/qcom/wcnss_ctrl.h> #include "wcn36xx.h" +#include "testmode.h" unsigned int wcn36xx_dbg_mask; module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644); @@ -353,6 +354,19 @@ static void wcn36xx_stop(struct ieee80211_hw *hw) wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n"); + cancel_work_sync(&wcn->scan_work); + + mutex_lock(&wcn->scan_lock); + if (wcn->scan_req) { + struct cfg80211_scan_info scan_info = { + .aborted = true, + }; + + ieee80211_scan_completed(wcn->hw, &scan_info); + } + wcn->scan_req = NULL; + mutex_unlock(&wcn->scan_lock); + wcn36xx_debugfs_exit(wcn); wcn36xx_smd_stop(wcn); wcn36xx_dxe_deinit(wcn); @@ -549,6 +563,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } else { wcn36xx_smd_set_bsskey(wcn, vif_priv->encrypt_type, + vif_priv->bss_index, key_conf->keyidx, key_conf->keylen, key); @@ -566,10 +581,13 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case DISABLE_KEY: if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) { + if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) + wcn36xx_smd_remove_bsskey(wcn, + vif_priv->encrypt_type, + vif_priv->bss_index, + key_conf->keyidx); + vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE; - wcn36xx_smd_remove_bsskey(wcn, - vif_priv->encrypt_type, - key_conf->keyidx); } else { sta_priv->is_data_encrypted = false; /* do not remove key if disassociated */ @@ -670,10 +688,18 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw, wcn->scan_aborted = true; mutex_unlock(&wcn->scan_lock); - /* ieee80211_scan_completed will be called on FW scan indication */ - wcn36xx_smd_stop_hw_scan(wcn); - - cancel_work_sync(&wcn->scan_work); + if (get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) { + /* ieee80211_scan_completed will be called on FW scan + * indication */ + wcn36xx_smd_stop_hw_scan(wcn); + } else { + struct cfg80211_scan_info scan_info = { + .aborted = true, + }; + + cancel_work_sync(&wcn->scan_work); + ieee80211_scan_completed(wcn->hw, &scan_info); + } } static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, @@ -773,6 +799,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, if (!is_zero_ether_addr(bss_conf->bssid)) { vif_priv->is_joining = true; vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; + wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr, + WCN36XX_HAL_LINK_PREASSOC_STATE); wcn36xx_smd_join(wcn, bss_conf->bssid, vif->addr, WCN36XX_HW_CHANNEL(wcn)); wcn36xx_smd_config_bss(wcn, vif, NULL, @@ -780,6 +808,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, } else { vif_priv->is_joining = false; wcn36xx_smd_delete_bss(wcn, vif); + wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr, + WCN36XX_HAL_LINK_IDLE_STATE); vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE; } } @@ -953,6 +983,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, mutex_lock(&wcn->conf_mutex); + vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; list_add(&vif_priv->list, &wcn->vif_list); wcn36xx_smd_add_sta_self(wcn, vif); @@ -1116,6 +1147,8 @@ static const struct ieee80211_ops wcn36xx_ops = { .sta_add = wcn36xx_sta_add, .sta_remove = wcn36xx_sta_remove, .ampdu_action = wcn36xx_ampdu_action, + + CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd) }; static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) @@ -1283,6 +1316,12 @@ static int wcn36xx_probe(struct platform_device *pdev) mutex_init(&wcn->hal_mutex); mutex_init(&wcn->scan_lock); + ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32)); + if (ret < 0) { + wcn36xx_err("failed to set DMA mask: %d\n", ret); + goto out_wq; + } + INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker); wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw); diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 8932af5e4d8d..b4dadf75d565 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -252,23 +252,29 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len) { int ret = 0; unsigned long start; + struct wcn36xx_hal_msg_header *hdr = + (struct wcn36xx_hal_msg_header *)wcn->hal_buf; + u16 req_type = hdr->msg_type; + wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len); init_completion(&wcn->hal_rsp_compl); start = jiffies; ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, len); if (ret) { - wcn36xx_err("HAL TX failed\n"); + wcn36xx_err("HAL TX failed for req %d\n", req_type); goto out; } if (wait_for_completion_timeout(&wcn->hal_rsp_compl, msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) { - wcn36xx_err("Timeout! No SMD response in %dms\n", - HAL_MSG_TIMEOUT); + wcn36xx_err("Timeout! No SMD response to req %d in %dms\n", + req_type, HAL_MSG_TIMEOUT); ret = -ETIME; goto out; } - wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms", + wcn36xx_dbg(WCN36XX_DBG_SMD, + "SMD command (req %d, rsp %d) completed in %dms\n", + req_type, hdr->msg_type, jiffies_to_msecs(jiffies - start)); out: return ret; @@ -292,12 +298,26 @@ static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr, msg_body.header.len = sizeof(msg_body); \ } while (0) \ +#define INIT_HAL_PTT_MSG(p_msg_body, ppt_msg_len) \ + do { \ + memset(p_msg_body, 0, sizeof(*p_msg_body) + ppt_msg_len); \ + p_msg_body->header.msg_type = WCN36XX_HAL_PROCESS_PTT_REQ; \ + p_msg_body->header.msg_version = WCN36XX_HAL_MSG_VERSION0; \ + p_msg_body->header.len = sizeof(*p_msg_body) + ppt_msg_len; \ + } while (0) + #define PREPARE_HAL_BUF(send_buf, msg_body) \ do { \ memset(send_buf, 0, msg_body.header.len); \ memcpy(send_buf, &msg_body, sizeof(msg_body)); \ } while (0) \ +#define PREPARE_HAL_PTT_MSG_BUF(send_buf, p_msg_body) \ + do { \ + memset(send_buf, 0, p_msg_body->header.len); \ + memcpy(send_buf, p_msg_body, p_msg_body->header.len); \ + } while (0) + static int wcn36xx_smd_rsp_status_check(void *buf, size_t len) { struct wcn36xx_fw_msg_status_rsp *rsp; @@ -620,9 +640,13 @@ out: int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct cfg80211_scan_request *req) { + struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); struct wcn36xx_hal_start_scan_offload_req_msg msg_body; int ret, i; + if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN) + return -EINVAL; + mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ); @@ -631,6 +655,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, msg_body.max_ch_time = 100; msg_body.scan_hidden = 1; memcpy(msg_body.mac, vif->addr, ETH_ALEN); + msg_body.bss_type = vif_priv->bss_type; msg_body.p2p_search = vif->p2p; msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids)); @@ -646,6 +671,14 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif, for (i = 0; i < msg_body.num_channel; i++) msg_body.channels[i] = req->channels[i]->hw_value; + msg_body.header.len -= WCN36XX_MAX_SCAN_IE_LEN; + + if (req->ie_len > 0) { + msg_body.ie_len = req->ie_len; + msg_body.header.len += req->ie_len; + memcpy(msg_body.ie, req->ie, req->ie_len); + } + PREPARE_HAL_BUF(wcn->hal_buf, msg_body); wcn36xx_dbg(WCN36XX_DBG_HAL, @@ -741,6 +774,71 @@ out: return ret; } +static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len, + void **p_ptt_rsp_msg) +{ + struct wcn36xx_hal_process_ptt_msg_rsp_msg *rsp; + int ret; + + ret = wcn36xx_smd_rsp_status_check(buf, len); + if (ret) + return ret; + + rsp = (struct wcn36xx_hal_process_ptt_msg_rsp_msg *)buf; + + wcn36xx_dbg(WCN36XX_DBG_HAL, "process ptt msg responded with length %d\n", + rsp->header.len); + wcn36xx_dbg_dump(WCN36XX_DBG_HAL_DUMP, "HAL_PTT_MSG_RSP:", rsp->ptt_msg, + rsp->header.len - sizeof(rsp->ptt_msg_resp_status)); + + if (rsp->header.len > 0) { + *p_ptt_rsp_msg = kmalloc(rsp->header.len, GFP_ATOMIC); + if (!*p_ptt_rsp_msg) + return -ENOMEM; + memcpy(*p_ptt_rsp_msg, rsp->ptt_msg, rsp->header.len); + } + return ret; +} + +int wcn36xx_smd_process_ptt_msg(struct wcn36xx *wcn, + struct ieee80211_vif *vif, void *ptt_msg, size_t len, + void **ptt_rsp_msg) +{ + struct wcn36xx_hal_process_ptt_msg_req_msg *p_msg_body; + int ret; + + mutex_lock(&wcn->hal_mutex); + p_msg_body = kmalloc( + sizeof(struct wcn36xx_hal_process_ptt_msg_req_msg) + len, + GFP_ATOMIC); + if (!p_msg_body) { + ret = -ENOMEM; + goto out_nomem; + } + INIT_HAL_PTT_MSG(p_msg_body, len); + + memcpy(&p_msg_body->ptt_msg, ptt_msg, len); + + PREPARE_HAL_PTT_MSG_BUF(wcn->hal_buf, p_msg_body); + + ret = wcn36xx_smd_send_and_wait(wcn, p_msg_body->header.len); + if (ret) { + wcn36xx_err("Sending hal_process_ptt_msg failed\n"); + goto out; + } + ret = wcn36xx_smd_process_ptt_msg_rsp(wcn->hal_buf, wcn->hal_rsp_len, + ptt_rsp_msg); + if (ret) { + wcn36xx_err("process_ptt_msg response failed err=%d\n", ret); + goto out; + } +out: + kfree(p_msg_body); +out_nomem: + mutex_unlock(&wcn->hal_mutex); + return ret; +} + static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len) { struct wcn36xx_hal_update_scan_params_resp *rsp; @@ -1399,9 +1497,10 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif, bss->spectrum_mgt_enable = 0; bss->tx_mgmt_power = 0; bss->max_tx_power = WCN36XX_MAX_POWER(wcn); - bss->action = update; + vif_priv->bss_type = bss->bss_type; + wcn36xx_dbg(WCN36XX_DBG_HAL, "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n", bss->bssid, bss->self_mac_addr, bss->bss_type, @@ -1446,6 +1545,10 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif) int ret = 0; mutex_lock(&wcn->hal_mutex); + + if (vif_priv->bss_index == WCN36XX_HAL_BSS_INVALID_IDX) + goto out; + INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ); msg_body.bss_index = vif_priv->bss_index; @@ -1464,6 +1567,8 @@ int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif) wcn36xx_err("hal_delete_bss response failed err=%d\n", ret); goto out; } + + vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX; out: mutex_unlock(&wcn->hal_mutex); return ret; @@ -1630,6 +1735,7 @@ out: int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx, u8 keylen, u8 *key) @@ -1639,7 +1745,7 @@ int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn, mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ); - msg_body.bss_idx = 0; + msg_body.bss_idx = bssidx; msg_body.enc_type = enc_type; msg_body.num_keys = 1; msg_body.keys[0].id = keyidx; @@ -1700,6 +1806,7 @@ out: int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx) { struct wcn36xx_hal_remove_bss_key_req_msg msg_body; @@ -1707,7 +1814,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ); - msg_body.bss_idx = 0; + msg_body.bss_idx = bssidx; msg_body.enc_type = enc_type; msg_body.key_id = keyidx; @@ -2132,11 +2239,13 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) return -EIO; } - wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)", rsp->type); + wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)\n", rsp->type); switch (rsp->type) { case WCN36XX_HAL_SCAN_IND_FAILED: + case WCN36XX_HAL_SCAN_IND_DEQUEUED: scan_info.aborted = true; + /* fall through */ case WCN36XX_HAL_SCAN_IND_COMPLETED: mutex_lock(&wcn->scan_lock); wcn->scan_req = NULL; @@ -2147,7 +2256,6 @@ static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len) break; case WCN36XX_HAL_SCAN_IND_STARTED: case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL: - case WCN36XX_HAL_SCAN_IND_DEQUEUED: case WCN36XX_HAL_SCAN_IND_PREEMPTED: case WCN36XX_HAL_SCAN_IND_RESTARTED: break; @@ -2367,6 +2475,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, case WCN36XX_HAL_JOIN_RSP: case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP: case WCN36XX_HAL_CH_SWITCH_RSP: + case WCN36XX_HAL_PROCESS_PTT_RSP: case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP: case WCN36XX_HAL_8023_MULTICAST_LIST_RSP: case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP: @@ -2407,6 +2516,7 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev, return 0; } + static void wcn36xx_ind_smd_work(struct work_struct *work) { struct wcn36xx *wcn = @@ -2471,24 +2581,24 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) } int wcn36xx_smd_open(struct wcn36xx *wcn) { - int ret = 0; wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind"); - if (!wcn->hal_ind_wq) { - wcn36xx_err("failed to allocate wq\n"); - ret = -ENOMEM; - goto out; - } + if (!wcn->hal_ind_wq) + return -ENOMEM; + INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work); INIT_LIST_HEAD(&wcn->hal_ind_queue); spin_lock_init(&wcn->hal_ind_lock); return 0; - -out: - return ret; } void wcn36xx_smd_close(struct wcn36xx *wcn) { + struct wcn36xx_hal_ind_msg *msg, *tmp; + + cancel_work_sync(&wcn->hal_ind_work); destroy_workqueue(wcn->hal_ind_wq); + + list_for_each_entry_safe(msg, tmp, &wcn->hal_ind_queue, list) + kfree(msg); } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index 8076edf40ac8..ff15df8ab56f 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -86,6 +86,10 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif, u16 p2p_off); int wcn36xx_smd_switch_channel(struct wcn36xx *wcn, struct ieee80211_vif *vif, int ch); +int wcn36xx_smd_process_ptt_msg(struct wcn36xx *wcn, + struct ieee80211_vif *vif, + void *ptt_msg, size_t len, + void **ptt_rsp_msg); int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn, struct ieee80211_vif *vif, struct sk_buff *skb); @@ -97,6 +101,7 @@ int wcn36xx_smd_set_stakey(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx, u8 keylen, u8 *key); @@ -106,6 +111,7 @@ int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn, u8 sta_index); int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn, enum ani_ed_type enc_type, + u8 bssidx, u8 keyidx); int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif); int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c new file mode 100644 index 000000000000..1279064a3b71 --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/testmode.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <net/netlink.h> +#include <linux/firmware.h> +#include <net/cfg80211.h> +#include "wcn36xx.h" + +#include "testmode.h" +#include "testmode_i.h" +#include "hal.h" +#include "smd.h" + +static const struct nla_policy wcn36xx_tm_policy[WCN36XX_TM_ATTR_MAX + 1] = { + [WCN36XX_TM_ATTR_CMD] = { .type = NLA_U16 }, + [WCN36XX_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = WCN36XX_TM_DATA_MAX_LEN }, +}; + +struct build_release_number { + u16 drv_major; + u16 drv_minor; + u16 drv_patch; + u16 drv_build; + u16 ptt_max; + u16 ptt_min; + u16 fw_ver; +} __packed; + +static int wcn36xx_tm_cmd_ptt(struct wcn36xx *wcn, struct ieee80211_vif *vif, + struct nlattr *tb[]) +{ + int ret = 0, buf_len; + void *buf; + struct ftm_rsp_msg *msg, *rsp = NULL; + struct sk_buff *skb; + + if (!tb[WCN36XX_TM_ATTR_DATA]) + return -EINVAL; + + buf = nla_data(tb[WCN36XX_TM_ATTR_DATA]); + buf_len = nla_len(tb[WCN36XX_TM_ATTR_DATA]); + msg = (struct ftm_rsp_msg *)buf; + + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "testmode cmd wmi msg_id 0x%04X msg_len %d buf %pK buf_len %d\n", + msg->msg_id, msg->msg_body_length, + buf, buf_len); + + wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "REQ ", buf, buf_len); + + if (msg->msg_id == MSG_GET_BUILD_RELEASE_NUMBER) { + struct build_release_number *body = + (struct build_release_number *) + msg->msg_response; + + body->drv_major = wcn->fw_major; + body->drv_minor = wcn->fw_minor; + body->drv_patch = wcn->fw_version; + body->drv_build = wcn->fw_revision; + body->ptt_max = 10; + body->ptt_min = 0; + + rsp = msg; + rsp->resp_status = 0; + } else { + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "PPT Request >> HAL size %d\n", + msg->msg_body_length); + + msg->resp_status = wcn36xx_smd_process_ptt_msg(wcn, vif, msg, + msg->msg_body_length, (void *)(&rsp)); + + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "Response status = %d\n", + msg->resp_status); + if (rsp) + wcn36xx_dbg(WCN36XX_DBG_TESTMODE, + "PPT Response << HAL size %d\n", + rsp->msg_body_length); + } + + if (!rsp) { + rsp = msg; + wcn36xx_warn("No response! Echoing request with response status %d\n", + rsp->resp_status); + } + wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "RSP ", + rsp, rsp->msg_body_length); + + skb = cfg80211_testmode_alloc_reply_skb(wcn->hw->wiphy, + nla_total_size(msg->msg_body_length)); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ret = nla_put(skb, WCN36XX_TM_ATTR_DATA, rsp->msg_body_length, rsp); + if (ret) { + kfree_skb(skb); + goto out; + } + + ret = cfg80211_testmode_reply(skb); + +out: + if (rsp != msg) + kfree(rsp); + + return ret; +} + +int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct wcn36xx *wcn = hw->priv; + struct nlattr *tb[WCN36XX_TM_ATTR_MAX + 1]; + int ret = 0; + unsigned short attr; + + wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "Data:", data, len); + ret = nla_parse(tb, WCN36XX_TM_ATTR_MAX, data, len, + wcn36xx_tm_policy, NULL); + if (ret) + return ret; + + if (!tb[WCN36XX_TM_ATTR_CMD]) + return -EINVAL; + + attr = nla_get_u16(tb[WCN36XX_TM_ATTR_CMD]); + + if (attr != WCN36XX_TM_CMD_PTT) + return -EOPNOTSUPP; + + return wcn36xx_tm_cmd_ptt(wcn, vif, tb); +} diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.h b/drivers/net/wireless/ath/wcn36xx/testmode.h new file mode 100644 index 000000000000..4c6cfdb46580 --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/testmode.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "wcn36xx.h" + +struct ftm_rsp_msg { + u16 msg_id; + u16 msg_body_length; + u32 resp_status; + u8 msg_response[0]; +} __packed; + +/* The request buffer of FTM which contains a byte of command and the request */ +struct ftm_payload { + u16 ftm_cmd_type; + struct ftm_rsp_msg ftm_cmd_msg; +} __packed; + +#define MSG_GET_BUILD_RELEASE_NUMBER 0x32A2 + +#ifdef CONFIG_NL80211_TESTMODE +int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + +#else +static inline int wcn36xx_tm_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + return 0; +} + +#endif diff --git a/drivers/net/wireless/ath/wcn36xx/testmode_i.h b/drivers/net/wireless/ath/wcn36xx/testmode_i.h new file mode 100644 index 000000000000..8a1477ffd5a0 --- /dev/null +++ b/drivers/net/wireless/ath/wcn36xx/testmode_i.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define WCN36XX_TM_DATA_MAX_LEN 5000 + +enum wcn36xx_tm_attr { + __WCN36XX_TM_ATTR_INVALID = 0, + WCN36XX_TM_ATTR_CMD = 1, + WCN36XX_TM_ATTR_DATA = 2, + + /* keep last */ + __WCN36XX_TM_ATTR_AFTER_LAST, + WCN36XX_TM_ATTR_MAX = __WCN36XX_TM_ATTR_AFTER_LAST - 1, +}; + +#define WCN36XX_TM_CMD_PTT 3 diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c index b1768ed6b0be..a6902371e89c 100644 --- a/drivers/net/wireless/ath/wcn36xx/txrx.c +++ b/drivers/net/wireless/ath/wcn36xx/txrx.c @@ -273,6 +273,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, bool bcast = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); struct wcn36xx_tx_bd bd; + int ret; memset(&bd, 0, sizeof(bd)); @@ -317,5 +318,17 @@ int wcn36xx_start_tx(struct wcn36xx *wcn, buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32)); bd.tx_bd_sign = 0xbdbdbdbd; - return wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low); + ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low); + if (ret && bd.tx_comp) { + /* If the skb has not been transmitted, + * don't keep a reference to it. + */ + spin_lock_irqsave(&wcn->dxe_lock, flags); + wcn->tx_ack_skb = NULL; + spin_unlock_irqrestore(&wcn->dxe_lock, flags); + + ieee80211_wake_queues(wcn->hw); + } + + return ret; } diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 5854adf43f3a..11e74015c79a 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -32,12 +32,6 @@ #define WLAN_NV_FILE "wlan/prima/WCNSS_qcom_wlan_nv.bin" #define WCN36XX_AGGR_BUFFER_SIZE 64 -/* How many frames until we start a-mpdu TX session */ -#define WCN36XX_AMPDU_START_THRESH 20 - -#define WCN36XX_MAX_SCAN_SSIDS 9 -#define WCN36XX_MAX_SCAN_IE_LEN 500 - extern unsigned int wcn36xx_dbg_mask; enum wcn36xx_debug_mask { @@ -56,6 +50,8 @@ enum wcn36xx_debug_mask { WCN36XX_DBG_BEACON_DUMP = 0x00001000, WCN36XX_DBG_PMC = 0x00002000, WCN36XX_DBG_PMC_DUMP = 0x00004000, + WCN36XX_DBG_TESTMODE = 0x00008000, + WCN36XX_DBG_TESTMODE_DUMP = 0x00010000, WCN36XX_DBG_ANY = 0xffffffff, }; @@ -123,6 +119,7 @@ struct wcn36xx_vif { bool is_joining; bool sta_assoc; struct wcn36xx_hal_mac_ssid ssid; + enum wcn36xx_hal_bss_type bss_type; /* Power management */ enum wcn36xx_power_state pw_state; diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index b448926b0c0f..3548e8d5e18e 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -33,7 +33,7 @@ config WIL6210_TRACING bool "wil6210 tracing support" depends on WIL6210 depends on EVENT_TRACING - default y + default n ---help--- Say Y here to enable tracepoints for the wil6210 driver using the kernel tracing infrastructure. Select this diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index cdbb393863f3..78946f28d0c7 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -276,6 +276,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, struct wil_net_stats *stats = &wil->sta[cid].stats; int rc; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_NOTIFY_REQ_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1081,17 +1083,11 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, u64 *cookie) { const u8 *buf = params->buf; - size_t len = params->len, total; + size_t len = params->len; struct wil6210_priv *wil = wiphy_to_wil(wiphy); struct wil6210_vif *vif = wdev_to_vif(wil, wdev); int rc; - bool tx_status = false; - struct ieee80211_mgmt *mgmt_frame = (void *)buf; - struct wmi_sw_tx_req_cmd *cmd; - struct { - struct wmi_cmd_hdr wmi; - struct wmi_sw_tx_complete_event evt; - } __packed evt; + bool tx_status; /* Note, currently we do not support the "wait" parameter, user-space * must call remain_on_channel before mgmt_tx or listen on a channel @@ -1100,34 +1096,9 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, * different from currently "listened" channel and fail if it is. */ - wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid); - wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, - len, true); - - if (len < sizeof(struct ieee80211_hdr_3addr)) - return -EINVAL; - - total = sizeof(*cmd) + len; - if (total < len) - return -EINVAL; - - cmd = kmalloc(total, GFP_KERNEL); - if (!cmd) { - rc = -ENOMEM; - goto out; - } - - memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); - cmd->len = cpu_to_le16(len); - memcpy(cmd->payload, buf, len); - - rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, - WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); - if (rc == 0) - tx_status = !evt.evt.status; + rc = wmi_mgmt_tx(vif, buf, len); + tx_status = (rc == 0); - kfree(cmd); - out: cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len, tx_status, GFP_KERNEL); return rc; @@ -2277,7 +2248,9 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_get_rf_sector_params_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct sk_buff *msg; struct nlattr *nl_cfgs, *nl_cfg; u32 i; @@ -2323,7 +2296,6 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy, cmd.sector_idx = cpu_to_le16(sector_index); cmd.sector_type = sector_type; cmd.rf_modules_vec = rf_modules_vec & 0xFF; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), @@ -2398,7 +2370,9 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_set_rf_sector_params_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct nlattr *nl_cfg; struct wmi_rf_sector_info *si; @@ -2481,7 +2455,6 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy, } cmd.rf_modules_vec = rf_modules_vec & 0xFF; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID, &reply, sizeof(reply), @@ -2505,7 +2478,9 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, struct { struct wmi_cmd_hdr wmi; struct wmi_get_selected_rf_sector_index_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; struct sk_buff *msg; if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities)) @@ -2545,7 +2520,6 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy, memset(&cmd, 0, sizeof(cmd)); cmd.cid = (u8)cid; cmd.sector_type = sector_type; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, @@ -2586,14 +2560,15 @@ static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_set_selected_rf_sector_index_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR}, + }; int rc; memset(&cmd, 0, sizeof(cmd)); cmd.sector_idx = cpu_to_le16(sector_index); cmd.sector_type = sector_type; cmd.cid = (u8)cid; - memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID, mid, &cmd, sizeof(cmd), WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID, diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 8c90b3111f0b..ebfdff4d328c 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1078,6 +1078,8 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) struct wmi_notify_req_done_event evt; } __packed reply; + memset(&reply, 0, sizeof(reply)); + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { u32 status; @@ -1200,8 +1202,12 @@ static const struct file_operations fops_freq = { static int wil_link_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; - struct station_info sinfo; - int i, rc; + struct station_info *sinfo; + int i, rc = 0; + + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { struct wil_sta_info *p = &wil->sta[i]; @@ -1229,19 +1235,21 @@ static int wil_link_debugfs_show(struct seq_file *s, void *data) vif = (mid < wil->max_vifs) ? wil->vifs[mid] : NULL; if (vif) { - rc = wil_cid_fill_sinfo(vif, i, &sinfo); + rc = wil_cid_fill_sinfo(vif, i, sinfo); if (rc) - return rc; + goto out; - seq_printf(s, " Tx_mcs = %d\n", sinfo.txrate.mcs); - seq_printf(s, " Rx_mcs = %d\n", sinfo.rxrate.mcs); - seq_printf(s, " SQ = %d\n", sinfo.signal); + seq_printf(s, " Tx_mcs = %d\n", sinfo->txrate.mcs); + seq_printf(s, " Rx_mcs = %d\n", sinfo->rxrate.mcs); + seq_printf(s, " SQ = %d\n", sinfo->signal); } else { seq_puts(s, " INVALID MID\n"); } } - return 0; +out: + kfree(sinfo); + return rc; } static int wil_link_seq_open(struct inode *inode, struct file *file) @@ -1377,8 +1385,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r) u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size; unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old; - seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout, - r->head_seq_num); + seq_printf(s, "([%2d]) 0x%03x [", r->buf_size, r->head_seq_num); for (i = 0; i < r->buf_size; i++) { if (i == index) seq_printf(s, "%c", r->reorder_buf[i] ? 'O' : '|'); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index a4b413e8d55a..e7006c2428a0 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -342,6 +342,8 @@ void wil_disconnect_worker(struct work_struct *work) /* already disconnected */ return; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, vif->mid, NULL, 0, WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), WIL6210_DISCONNECT_TO_MS); @@ -391,7 +393,7 @@ static void wil_fw_error_worker(struct work_struct *work) struct wil6210_priv *wil = container_of(work, struct wil6210_priv, fw_error_worker); struct net_device *ndev = wil->main_ndev; - struct wireless_dev *wdev = ndev->ieee80211_ptr; + struct wireless_dev *wdev; wil_dbg_misc(wil, "fw error worker\n"); @@ -399,6 +401,7 @@ static void wil_fw_error_worker(struct work_struct *work) wil_info(wil, "No recovery - interface is down\n"); return; } + wdev = ndev->ieee80211_ptr; /* increment @recovery_count if less then WIL6210_FW_RECOVERY_TO * passed since last recovery attempt diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 05e9408e7ea3..eb6c14ed65a4 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -457,16 +457,16 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid) return; } + mutex_lock(&wil->mutex); + wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); + mutex_unlock(&wil->mutex); + ndev = vif_to_ndev(vif); /* during unregister_netdevice cfg80211_leave may perform operations * such as stop AP, disconnect, so we only clear the VIF afterwards */ unregister_netdevice(ndev); - mutex_lock(&wil->mutex); - wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); - mutex_unlock(&wil->mutex); - if (any_active && vif->mid != 0) wmi_port_delete(wil, vif->mid); diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 14dcb0698dee..76f8084c1fd8 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -206,7 +206,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) /* put the frame in the reordering buffer */ r->reorder_buf[index] = skb; - r->reorder_time[index] = jiffies; r->stored_mpdu_num++; wil_reorder_release(ndev, r); @@ -252,11 +251,8 @@ struct wil_tid_ampdu_rx *wil_tid_ampdu_rx_alloc(struct wil6210_priv *wil, r->reorder_buf = kcalloc(size, sizeof(struct sk_buff *), GFP_KERNEL); - r->reorder_time = - kcalloc(size, sizeof(unsigned long), GFP_KERNEL); - if (!r->reorder_buf || !r->reorder_time) { + if (!r->reorder_buf) { kfree(r->reorder_buf); - kfree(r->reorder_time); kfree(r); return NULL; } @@ -286,7 +282,6 @@ void wil_tid_ampdu_rx_free(struct wil6210_priv *wil, kfree_skb(r->reorder_buf[i]); kfree(r->reorder_buf); - kfree(r->reorder_time); kfree(r); } diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index b60b9fcaaebd..b9a9fa828961 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -652,8 +652,8 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) v->swtail = next_tail) { rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); if (unlikely(rc)) { - wil_err(wil, "Error %d in wil_rx_refill[%d]\n", - rc, v->swtail); + wil_err_ratelimited(wil, "Error %d in rx refill[%d]\n", + rc, v->swtail); break; } } @@ -963,7 +963,9 @@ int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, struct { struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; - } __packed reply; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; @@ -1045,7 +1047,9 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) struct { struct wmi_cmd_hdr wmi; struct wmi_vring_cfg_done_event cmd; - } __packed reply; + } __packed reply = { + .cmd = {.status = WMI_FW_STATUS_FAILURE}, + }; struct vring *vring = &wil->vring_tx[id]; struct vring_tx_data *txdata = &wil->vring_tx_data[id]; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index f9c5155025bc..b623510c6f6c 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -493,38 +493,28 @@ struct pci_dev; * struct tid_ampdu_rx - TID aggregation information (Rx). * * @reorder_buf: buffer to reorder incoming aggregated MPDUs - * @reorder_time: jiffies when skb was added - * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) - * @reorder_timer: releases expired frames from the reorder buffer. * @last_rx: jiffies of last rx activity * @head_seq_num: head sequence number in reordering buffer. * @stored_mpdu_num: number of MPDUs in reordering buffer * @ssn: Starting Sequence Number expected to be aggregated. * @buf_size: buffer size for incoming A-MPDUs - * @timeout: reset timer value (in TUs). * @ssn_last_drop: SSN of the last dropped frame * @total: total number of processed incoming frames * @drop_dup: duplicate frames dropped for this reorder buffer * @drop_old: old frames dropped for this reorder buffer - * @dialog_token: dialog token for aggregation session * @first_time: true when this buffer used 1-st time */ struct wil_tid_ampdu_rx { struct sk_buff **reorder_buf; - unsigned long *reorder_time; - struct timer_list session_timer; - struct timer_list reorder_timer; unsigned long last_rx; u16 head_seq_num; u16 stored_mpdu_num; u16 ssn; u16 buf_size; - u16 timeout; u16 ssn_last_drop; unsigned long long total; /* frames processed */ unsigned long long drop_dup; unsigned long long drop_old; - u8 dialog_token; bool first_time; /* is it 1-st time this buffer used? */ }; @@ -986,7 +976,7 @@ int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, int wmi_send(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len); void wmi_recv_cmd(struct wil6210_priv *wil); int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, - u16 reply_id, void *reply, u8 reply_size, int to_msec); + u16 reply_id, void *reply, u16 reply_size, int to_msec); void wmi_event_worker(struct work_struct *work); void wmi_event_flush(struct wil6210_priv *wil); int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid); @@ -1150,5 +1140,6 @@ void wil6210_clear_halp(struct wil6210_priv *wil); int wmi_start_sched_scan(struct wil6210_priv *wil, struct cfg80211_sched_scan_request *request); int wmi_stop_sched_scan(struct wil6210_priv *wil); +int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len); #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index a3dda9a97c1f..5d991243cdb5 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -824,7 +824,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) struct wireless_dev *wdev = vif_to_wdev(vif); struct wmi_connect_event *evt = d; int ch; /* channel number */ - struct station_info sinfo; + struct station_info *sinfo; u8 *assoc_req_ie, *assoc_resp_ie; size_t assoc_req_ielen, assoc_resp_ielen; /* capinfo(u16) + listen_interval(u16) + IEs */ @@ -940,6 +940,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) vif->bss = NULL; } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { + if (rc) { if (disable_ap_sme) /* notify new_sta has failed */ @@ -947,16 +948,22 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) goto out; } - memset(&sinfo, 0, sizeof(sinfo)); + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) { + rc = -ENOMEM; + goto out; + } - sinfo.generation = wil->sinfo_gen++; + sinfo->generation = wil->sinfo_gen++; if (assoc_req_ie) { - sinfo.assoc_req_ies = assoc_req_ie; - sinfo.assoc_req_ies_len = assoc_req_ielen; + sinfo->assoc_req_ies = assoc_req_ie; + sinfo->assoc_req_ies_len = assoc_req_ielen; } - cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); + cfg80211_new_sta(ndev, evt->bssid, sinfo, GFP_KERNEL); + + kfree(sinfo); } else { wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype, evt->cid); @@ -1416,7 +1423,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) } int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, - u16 reply_id, void *reply, u8 reply_size, int to_msec) + u16 reply_id, void *reply, u16 reply_size, int to_msec) { int rc; unsigned long remain; @@ -1509,7 +1516,9 @@ int wmi_led_cfg(struct wil6210_priv *wil, bool enable) struct { struct wmi_cmd_hdr wmi; struct wmi_led_cfg_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le32(WMI_FW_STATUS_FAILURE)}, + }; if (led_id == WIL_LED_INVALID_ID) goto out; @@ -1554,13 +1563,17 @@ int wmi_pcp_start(struct wil6210_vif *vif, .pcp_max_assoc_sta = max_assoc_sta, .hidden_ssid = hidden_ssid, .is_go = is_go, - .disable_ap_sme = disable_ap_sme, + .ap_sme_offload_mode = disable_ap_sme ? + WMI_AP_SME_OFFLOAD_PARTIAL : + WMI_AP_SME_OFFLOAD_FULL, .abft_len = wil->abft_len, }; struct { struct wmi_cmd_hdr wmi; struct wmi_pcp_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; if (!vif->privacy) cmd.disable_sec = 1; @@ -1574,7 +1587,7 @@ int wmi_pcp_start(struct wil6210_vif *vif, } if (disable_ap_sme && - !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME, + !test_bit(WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL, wil->fw_capabilities)) { wil_err(wil, "disable_ap_sme not supported by FW\n"); return -EOPNOTSUPP; @@ -1637,6 +1650,8 @@ int wmi_get_ssid(struct wil6210_vif *vif, u8 *ssid_len, void *ssid) } __packed reply; int len; /* reply.cmd.ssid_len in CPU order */ + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_SSID_CMDID, vif->mid, NULL, 0, WMI_GET_SSID_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1672,6 +1687,8 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel) struct wmi_set_pcp_channel_cmd cmd; } __packed reply; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, vif->mid, NULL, 0, WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); if (rc) @@ -1697,7 +1714,9 @@ int wmi_p2p_cfg(struct wil6210_vif *vif, int channel, int bi) struct { struct wmi_cmd_hdr wmi; struct wmi_p2p_cfg_done_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n"); @@ -1718,7 +1737,9 @@ int wmi_start_listen(struct wil6210_vif *vif) struct { struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n"); @@ -1740,7 +1761,9 @@ int wmi_start_search(struct wil6210_vif *vif) struct { struct wmi_cmd_hdr wmi; struct wmi_search_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n"); @@ -1866,7 +1889,9 @@ int wmi_rxon(struct wil6210_priv *wil, bool on) struct { struct wmi_cmd_hdr wmi; struct wmi_listen_started_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_info(wil, "(%s)\n", on ? "on" : "off"); @@ -1908,6 +1933,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) } __packed evt; int rc; + memset(&evt, 0, sizeof(evt)); + if (wdev->iftype == NL80211_IFTYPE_MONITOR) { struct ieee80211_channel *ch = wil->monitor_chandef.chan; @@ -1937,14 +1964,14 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) if (rc) return rc; + if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) + rc = -EINVAL; + vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", le32_to_cpu(evt.evt.status), vring->hwtail); - if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) - rc = -EINVAL; - return rc; } @@ -1962,6 +1989,8 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) struct wmi_temp_sense_done_event evt; } __packed reply; + memset(&reply, 0, sizeof(reply)); + rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) @@ -1994,6 +2023,7 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); + memset(&reply, 0, sizeof(reply)); vif->locally_generated_disc = true; if (del_sta) { ether_addr_copy(del_sta_cmd.dst_mac, mac); @@ -2092,7 +2122,9 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_rcp_addba_resp_sent_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)}, + }; wil_dbg_wmi(wil, "ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n", @@ -2125,13 +2157,13 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_ps_dev_profile_cfg_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR)}, + }; u32 status; wil_dbg_wmi(wil, "Setting ps dev profile %d\n", ps_profile); - reply.evt.status = cpu_to_le32(WMI_PS_CFG_CMD_STATUS_ERROR); - rc = wmi_call(wil, WMI_PS_DEV_PROFILE_CFG_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_PS_DEV_PROFILE_CFG_EVENTID, &reply, sizeof(reply), @@ -2160,15 +2192,15 @@ int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short) struct { struct wmi_cmd_hdr wmi; struct wmi_set_mgmt_retry_limit_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_wmi(wil, "Setting mgmt retry short %d\n", retry_short); if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_SET_MGMT_RETRY_LIMIT_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_SET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), @@ -2199,7 +2231,7 @@ int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short) if (!test_bit(WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.mgmt_retry_limit = 0; + memset(&reply, 0, sizeof(reply)); rc = wmi_call(wil, WMI_GET_MGMT_RETRY_LIMIT_CMDID, vif->mid, NULL, 0, WMI_GET_MGMT_RETRY_LIMIT_EVENTID, &reply, sizeof(reply), 100); @@ -2282,14 +2314,15 @@ int wmi_suspend(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_traffic_suspend_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE}, + }; + u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP; wil->suspend_resp_rcvd = false; wil->suspend_resp_comp = false; - reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE; - rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply), @@ -2365,10 +2398,11 @@ int wmi_resume(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_traffic_resume_event evt; - } __packed reply; - - reply.evt.status = WMI_TRAFFIC_RESUME_FAILED; - reply.evt.resume_triggers = WMI_RESUME_TRIGGER_UNKNOWN; + } __packed reply = { + .evt = {.status = WMI_TRAFFIC_RESUME_FAILED, + .resume_triggers = + cpu_to_le32(WMI_RESUME_TRIGGER_UNKNOWN)}, + }; rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, vif->mid, NULL, 0, WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply), @@ -2394,7 +2428,9 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, struct { struct wmi_cmd_hdr wmi; struct wmi_port_allocated_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_misc(wil, "port allocate, mid %d iftype %d, mac %pM\n", mid, iftype, mac); @@ -2419,8 +2455,6 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, return -EINVAL; } - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_PORT_ALLOCATE_CMDID, mid, &cmd, sizeof(cmd), WMI_PORT_ALLOCATED_EVENTID, &reply, @@ -2447,12 +2481,12 @@ int wmi_port_delete(struct wil6210_priv *wil, u8 mid) struct { struct wmi_cmd_hdr wmi; struct wmi_port_deleted_event evt; - } __packed reply; + } __packed reply = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; wil_dbg_misc(wil, "port delete, mid %d\n", mid); - reply.evt.status = WMI_FW_STATUS_FAILURE; - rc = wmi_call(wil, WMI_PORT_DELETE_CMDID, mid, &cmd, sizeof(cmd), WMI_PORT_DELETED_EVENTID, &reply, @@ -2709,7 +2743,9 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, struct { struct wmi_cmd_hdr wmi; struct wmi_start_sched_scan_event evt; - } __packed reply; + } __packed reply = { + .evt = {.result = WMI_PNO_REJECT}, + }; if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) return -ENOTSUPP; @@ -2725,8 +2761,6 @@ int wmi_start_sched_scan(struct wil6210_priv *wil, wmi_sched_scan_set_plans(wil, &cmd, request->scan_plans, request->n_scan_plans); - reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, vif->mid, &cmd, sizeof(cmd), WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply), @@ -2750,13 +2784,13 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil) struct { struct wmi_cmd_hdr wmi; struct wmi_stop_sched_scan_event evt; - } __packed reply; + } __packed reply = { + .evt = {.result = WMI_PNO_REJECT}, + }; if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) return -ENOTSUPP; - reply.evt.result = WMI_PNO_REJECT; - rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, vif->mid, NULL, 0, WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS); @@ -2771,3 +2805,50 @@ int wmi_stop_sched_scan(struct wil6210_priv *wil) return 0; } + +int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len) +{ + size_t total; + struct wil6210_priv *wil = vif_to_wil(vif); + struct ieee80211_mgmt *mgmt_frame = (void *)buf; + struct wmi_sw_tx_req_cmd *cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_sw_tx_complete_event evt; + } __packed evt = { + .evt = {.status = WMI_FW_STATUS_FAILURE}, + }; + int rc; + + wil_dbg_misc(wil, "mgmt_tx mid %d\n", vif->mid); + wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); + + if (len < sizeof(struct ieee80211_hdr_3addr)) + return -EINVAL; + + total = sizeof(*cmd) + len; + if (total < len) { + wil_err(wil, "mgmt_tx invalid len %zu\n", len); + return -EINVAL; + } + + cmd = kmalloc(total, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memcpy(cmd->dst_mac, mgmt_frame->da, WMI_MAC_LEN); + cmd->len = cpu_to_le16(len); + memcpy(cmd->payload, buf, len); + + rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total, + WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000); + if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) { + wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status); + rc = -EINVAL; + } + + kfree(cmd); + + return rc; +} diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index d3e75f0ff245..dc503d903786 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1,4 +1,5 @@ /* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. * Copyright (c) 2006-2012 Wilocity * @@ -29,8 +30,6 @@ #ifndef __WILOCITY_WMI_H__ #define __WILOCITY_WMI_H__ -/* General */ -#define WMI_MAX_ASSOC_STA (8) #define WMI_DEFAULT_ASSOC_STA (1) #define WMI_MAC_LEN (6) #define WMI_PROX_RANGE_NUM (3) @@ -41,6 +40,19 @@ #define WMI_RF_ETYPE_LENGTH (3) #define WMI_RF_RX2TX_LENGTH (3) #define WMI_RF_ETYPE_VAL_PER_RANGE (5) +/* DTYPE configuration array size + * must always be kept equal to (WMI_RF_DTYPE_LENGTH+1) + */ +#define WMI_RF_DTYPE_CONF_LENGTH (4) +/* ETYPE configuration array size + * must always be kept equal to + * (WMI_RF_ETYPE_LENGTH+WMI_RF_ETYPE_VAL_PER_RANGE) + */ +#define WMI_RF_ETYPE_CONF_LENGTH (8) +/* RX2TX configuration array size + * must always be kept equal to (WMI_RF_RX2TX_LENGTH+1) + */ +#define WMI_RF_RX2TX_CONF_LENGTH (4) /* Mailbox interface * used for commands and events @@ -61,7 +73,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_PS_CONFIG = 1, WMI_FW_CAPABILITY_RF_SECTORS = 2, WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3, - WMI_FW_CAPABILITY_DISABLE_AP_SME = 4, + WMI_FW_CAPABILITY_AP_SME_OFFLOAD_PARTIAL = 4, WMI_FW_CAPABILITY_WMI_ONLY = 5, WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, WMI_FW_CAPABILITY_D3_SUSPEND = 8, @@ -73,6 +85,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14, WMI_FW_CAPABILITY_PNO = 15, WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18, + WMI_FW_CAPABILITY_AP_SME_OFFLOAD_NONE = 19, WMI_FW_CAPABILITY_MAX, }; @@ -164,12 +177,14 @@ enum wmi_command_id { WMI_SET_ACTIVE_SILENT_RSSI_TABLE_CMDID = 0x85C, WMI_RF_PWR_ON_DELAY_CMDID = 0x85D, WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID = 0x85E, + WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID = 0x85F, /* Performance monitoring commands */ WMI_BF_CTRL_CMDID = 0x862, WMI_NOTIFY_REQ_CMDID = 0x863, WMI_GET_STATUS_CMDID = 0x864, WMI_GET_RF_STATUS_CMDID = 0x866, WMI_GET_BASEBAND_TYPE_CMDID = 0x867, + WMI_VRING_SWITCH_TIMING_CONFIG_CMDID = 0x868, WMI_UNIT_TEST_CMDID = 0x900, WMI_FLASH_READ_CMDID = 0x902, WMI_FLASH_WRITE_CMDID = 0x903, @@ -202,6 +217,7 @@ enum wmi_command_id { WMI_GET_THERMAL_THROTTLING_CFG_CMDID = 0x941, /* Read Power Save profile type */ WMI_PS_DEV_PROFILE_CFG_READ_CMDID = 0x942, + WMI_TSF_SYNC_CMDID = 0x973, WMI_TOF_SESSION_START_CMDID = 0x991, WMI_TOF_GET_CAPABILITIES_CMDID = 0x992, WMI_TOF_SET_LCR_CMDID = 0x993, @@ -218,11 +234,16 @@ enum wmi_command_id { WMI_PRIO_TX_SECTORS_ORDER_CMDID = 0x9A5, WMI_PRIO_TX_SECTORS_NUMBER_CMDID = 0x9A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_CMDID = 0x9A7, + WMI_BF_CONTROL_CMDID = 0x9AA, WMI_SCHEDULING_SCHEME_CMDID = 0xA01, WMI_FIXED_SCHEDULING_CONFIG_CMDID = 0xA02, WMI_ENABLE_FIXED_SCHEDULING_CMDID = 0xA03, WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_CMDID = 0xA04, WMI_SET_LONG_RANGE_CONFIG_CMDID = 0xA05, + WMI_GET_ASSOC_LIST_CMDID = 0xA06, + WMI_GET_CCA_INDICATIONS_CMDID = 0xA07, + WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID = 0xA08, + WMI_INTERNAL_FW_IOCTL_CMDID = 0xA0B, WMI_SET_MAC_ADDRESS_CMDID = 0xF003, WMI_ABORT_SCAN_CMDID = 0xF007, WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041, @@ -484,6 +505,18 @@ enum wmi_rf_mgmt_type { WMI_RF_MGMT_GET_STATUS = 0x02, }; +/* WMI_BF_CONTROL_CMDID */ +enum wmi_bf_triggers { + WMI_BF_TRIGGER_RS_MCS1_TH_FAILURE = 0x01, + WMI_BF_TRIGGER_RS_MCS1_NO_BACK_FAILURE = 0x02, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP = 0x04, + WMI_BF_TRIGGER_MAX_BACK_FAILURE = 0x08, + WMI_BF_TRIGGER_FW = 0x10, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_KEEP_ALIVE = 0x20, + WMI_BF_TRIGGER_AOA = 0x40, + WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_UPM = 0x80, +}; + /* WMI_RF_MGMT_CMDID */ struct wmi_rf_mgmt_cmd { __le32 rf_mgmt_type; @@ -519,7 +552,9 @@ struct wmi_bcon_ctrl_cmd { u8 disable_sec; u8 hidden_ssid; u8 is_go; - u8 reserved[2]; + /* A-BFT length override if non-0 */ + u8 abft_len; + u8 reserved; } __packed; /* WMI_PORT_ALLOCATE_CMDID */ @@ -585,6 +620,16 @@ struct wmi_power_mgmt_cfg_cmd { } __packed; /* WMI_PCP_START_CMDID */ +enum wmi_ap_sme_offload_mode { + /* Full AP SME in FW */ + WMI_AP_SME_OFFLOAD_FULL = 0x00, + /* Probe AP SME in FW */ + WMI_AP_SME_OFFLOAD_PARTIAL = 0x01, + /* AP SME in host */ + WMI_AP_SME_OFFLOAD_NONE = 0x02, +}; + +/* WMI_PCP_START_CMDID */ struct wmi_pcp_start_cmd { __le16 bcon_interval; u8 pcp_max_assoc_sta; @@ -593,7 +638,8 @@ struct wmi_pcp_start_cmd { u8 reserved0[5]; /* A-BFT length override if non-0 */ u8 abft_len; - u8 disable_ap_sme; + /* enum wmi_ap_sme_offload_mode_e */ + u8 ap_sme_offload_mode; u8 network_type; u8 channel; u8 disable_sec_offload; @@ -607,6 +653,17 @@ struct wmi_sw_tx_req_cmd { u8 payload[0]; } __packed; +/* WMI_VRING_SWITCH_TIMING_CONFIG_CMDID */ +struct wmi_vring_switch_timing_config_cmd { + /* Set vring timing configuration: + * + * defined interval for vring switch + */ + __le32 interval_usec; + /* vring inactivity threshold */ + __le32 idle_th_usec; +} __packed; + struct wmi_sw_ring_cfg { __le64 ring_mem_base; __le16 ring_size; @@ -642,6 +699,7 @@ enum wmi_vring_cfg_schd_params_priority { WMI_SCH_PRIO_HIGH = 0x01, }; +#define CIDXTID_EXTENDED_CID_TID (0xFF) #define CIDXTID_CID_POS (0) #define CIDXTID_CID_LEN (4) #define CIDXTID_CID_MSK (0xF) @@ -662,6 +720,9 @@ struct wmi_vring_cfg { struct wmi_sw_ring_cfg tx_sw_ring; /* 0-23 vrings */ u8 ringid; + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 encap_trans_type; /* 802.3 DS cfg */ @@ -671,6 +732,11 @@ struct wmi_vring_cfg { u8 to_resolution; u8 agg_max_wsize; struct wmi_vring_cfg_schd schd_params; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; enum wmi_vring_cfg_cmd_action { @@ -868,23 +934,42 @@ struct wmi_cfg_rx_chain_cmd { /* WMI_RCP_ADDBA_RESP_CMDID */ struct wmi_rcp_addba_resp_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; __le16 status_code; /* ieee80211_ba_parameterset field to send */ __le16 ba_param_set; __le16 ba_timeout; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_RCP_DELBA_CMDID */ struct wmi_rcp_delba_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 reserved; __le16 reason; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved2[2]; } __packed; /* WMI_RCP_ADDBA_REQ_CMDID */ struct wmi_rcp_addba_req_cmd { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; /* ieee80211_ba_parameterset field as it received */ @@ -892,6 +977,11 @@ struct wmi_rcp_addba_req_cmd { __le16 ba_timeout; /* ieee80211_ba_seqstrl field as it received */ __le16 ba_seq_ctrl; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_SET_MAC_ADDRESS_CMDID */ @@ -902,15 +992,20 @@ struct wmi_set_mac_address_cmd { /* WMI_ECHO_CMDID * Check FW is alive - * WMI_DEEP_ECHO_CMDID - * Check FW and ucode are alive * Returned event: WMI_ECHO_RSP_EVENTID - * same event for both commands */ struct wmi_echo_cmd { __le32 value; } __packed; +/* WMI_DEEP_ECHO_CMDID + * Check FW and ucode are alive + * Returned event: WMI_ECHO_RSP_EVENTID + */ +struct wmi_deep_echo_cmd { + __le32 value; +} __packed; + /* WMI_RF_PWR_ON_DELAY_CMDID * set FW time parameters used through RF resetting * RF reset consists of bringing its power down for a period of time, then @@ -928,7 +1023,7 @@ struct wmi_rf_pwr_on_delay_cmd { __le16 up_delay_usec; } __packed; -/* \WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID +/* WMI_SET_HIGH_POWER_TABLE_PARAMS_CMDID * This API controls the Tx and Rx gain over temperature. * It controls the Tx D-type, Rx D-type and Rx E-type amplifiers. * It also controls the Tx gain index, by controlling the Rx to Tx gain index @@ -942,25 +1037,46 @@ struct wmi_set_high_power_table_params_cmd { u8 tx_dtype_temp[WMI_RF_DTYPE_LENGTH]; u8 reserved0; /* Tx D-type values to be used for each temperature range */ - __le32 tx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + __le32 tx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH]; + /* Temperature range for Tx E-type parameters */ + u8 tx_etype_temp[WMI_RF_ETYPE_LENGTH]; + u8 reserved1; + /* Tx E-type values to be used for each temperature range. + * The last 4 values of any range are the first 4 values of the next + * range and so on + */ + __le32 tx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH]; /* Temperature range for Rx D-type parameters */ u8 rx_dtype_temp[WMI_RF_DTYPE_LENGTH]; - u8 reserved1; + u8 reserved2; /* Rx D-type values to be used for each temperature range */ - __le32 rx_dtype_conf[WMI_RF_DTYPE_LENGTH + 1]; + __le32 rx_dtype_conf[WMI_RF_DTYPE_CONF_LENGTH]; /* Temperature range for Rx E-type parameters */ u8 rx_etype_temp[WMI_RF_ETYPE_LENGTH]; - u8 reserved2; + u8 reserved3; /* Rx E-type values to be used for each temperature range. * The last 4 values of any range are the first 4 values of the next * range and so on */ - __le32 rx_etype_conf[WMI_RF_ETYPE_VAL_PER_RANGE + WMI_RF_ETYPE_LENGTH]; + __le32 rx_etype_conf[WMI_RF_ETYPE_CONF_LENGTH]; /* Temperature range for rx_2_tx_offs parameters */ u8 rx_2_tx_temp[WMI_RF_RX2TX_LENGTH]; - u8 reserved3; + u8 reserved4; /* Rx to Tx gain index offset */ - s8 rx_2_tx_offs[WMI_RF_RX2TX_LENGTH + 1]; + s8 rx_2_tx_offs[WMI_RF_RX2TX_CONF_LENGTH]; +} __packed; + +/* WMI_FIXED_SCHEDULING_UL_CONFIG_CMDID + * This API sets rd parameter per mcs. + * Relevant only in Fixed Scheduling mode. + * Returned event: WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID + */ +struct wmi_fixed_scheduling_ul_config_cmd { + /* Use mcs -1 to set for every mcs */ + s8 mcs; + /* Number of frames with rd bit set in a single virtual slot */ + u8 rd_count_per_slot; + u8 reserved[2]; } __packed; /* CMD: WMI_RF_XPM_READ_CMDID */ @@ -1267,6 +1383,93 @@ struct wmi_set_long_range_config_complete_event { u8 reserved[3]; } __packed; +/* payload max size is 236 bytes: max event buffer size (256) - WMI headers + * (16) - prev struct field size (4) + */ +#define WMI_MAX_IOCTL_PAYLOAD_SIZE (236) +#define WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE (236) +#define WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE (236) + +enum wmi_internal_fw_ioctl_code { + WMI_INTERNAL_FW_CODE_NONE = 0x0, + WMI_INTERNAL_FW_CODE_QCOM = 0x1, +}; + +/* WMI_INTERNAL_FW_IOCTL_CMDID */ +struct wmi_internal_fw_ioctl_cmd { + /* enum wmi_internal_fw_ioctl_code */ + __le16 code; + __le16 length; + /* payload max size is WMI_MAX_IOCTL_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_INTERNAL_FW_IOCTL_EVENTID */ +struct wmi_internal_fw_ioctl_event { + /* wmi_fw_status */ + u8 status; + u8 reserved; + __le16 length; + /* payload max size is WMI_MAX_IOCTL_REPLY_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_INTERNAL_FW_EVENT_EVENTID */ +struct wmi_internal_fw_event_event { + __le16 id; + __le16 length; + /* payload max size is WMI_MAX_INTERNAL_EVENT_PAYLOAD_SIZE + * Must be the last member of the struct + */ + __le32 payload[0]; +} __packed; + +/* WMI_BF_CONTROL_CMDID */ +struct wmi_bf_control_cmd { + /* wmi_bf_triggers */ + __le32 triggers; + u8 cid; + /* DISABLED = 0, ENABLED = 1 , DRY_RUN = 2 */ + u8 txss_mode; + /* DISABLED = 0, ENABLED = 1, DRY_RUN = 2 */ + u8 brp_mode; + /* Max cts threshold (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_thr; + /* Max cts threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_CTS_FAILURE_IN_TXOP) + */ + u8 bf_trigger_max_cts_failure_dense_thr; + /* Max b-ack threshold (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_thr; + /* Max b-ack threshold in dense (correspond to + * WMI_BF_TRIGGER_MAX_BACK_FAILURE) + */ + u8 bf_trigger_max_back_failure_dense_thr; + u8 reserved0; + /* Wrong sectors threshold */ + __le32 wrong_sector_bis_thr; + /* BOOL to enable/disable long term trigger */ + u8 long_term_enable; + /* 1 = Update long term thresholds from the long_term_mbps_th_tbl and + * long_term_trig_timeout_per_mcs arrays, 0 = Ignore + */ + u8 long_term_update_thr; + /* Long term throughput threshold [Mbps] */ + u8 long_term_mbps_th_tbl[WMI_NUM_MCS]; + u8 reserved1; + /* Long term timeout threshold table [msec] */ + __le16 long_term_trig_timeout_per_mcs[WMI_NUM_MCS]; + u8 reserved2[2]; +} __packed; + /* WMI Events * List of Events (target to host) */ @@ -1325,6 +1528,7 @@ enum wmi_event_id { WMI_SET_SILENT_RSSI_TABLE_DONE_EVENTID = 0x185C, WMI_RF_PWR_ON_DELAY_RSP_EVENTID = 0x185D, WMI_SET_HIGH_POWER_TABLE_PARAMS_EVENTID = 0x185E, + WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID = 0x185F, /* Performance monitoring events */ WMI_DATA_PORT_OPEN_EVENTID = 0x1860, WMI_WBE_LINK_DOWN_EVENTID = 0x1861, @@ -1334,6 +1538,7 @@ enum wmi_event_id { WMI_VRING_EN_EVENTID = 0x1865, WMI_GET_RF_STATUS_EVENTID = 0x1866, WMI_GET_BASEBAND_TYPE_EVENTID = 0x1867, + WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID = 0x1868, WMI_UNIT_TEST_EVENTID = 0x1900, WMI_FLASH_READ_DONE_EVENTID = 0x1902, WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, @@ -1363,6 +1568,7 @@ enum wmi_event_id { WMI_GET_THERMAL_THROTTLING_CFG_EVENTID = 0x1941, /* return the Power Save profile */ WMI_PS_DEV_PROFILE_CFG_READ_EVENTID = 0x1942, + WMI_TSF_SYNC_STATUS_EVENTID = 0x1973, WMI_TOF_SESSION_END_EVENTID = 0x1991, WMI_TOF_GET_CAPABILITIES_EVENTID = 0x1992, WMI_TOF_SET_LCR_EVENTID = 0x1993, @@ -1380,17 +1586,24 @@ enum wmi_event_id { WMI_PRIO_TX_SECTORS_ORDER_EVENTID = 0x19A5, WMI_PRIO_TX_SECTORS_NUMBER_EVENTID = 0x19A6, WMI_PRIO_TX_SECTORS_SET_DEFAULT_CFG_EVENTID = 0x19A7, + WMI_BF_CONTROL_EVENTID = 0x19AA, WMI_SCHEDULING_SCHEME_EVENTID = 0x1A01, WMI_FIXED_SCHEDULING_CONFIG_COMPLETE_EVENTID = 0x1A02, WMI_ENABLE_FIXED_SCHEDULING_COMPLETE_EVENTID = 0x1A03, WMI_SET_MULTI_DIRECTED_OMNIS_CONFIG_EVENTID = 0x1A04, WMI_SET_LONG_RANGE_CONFIG_COMPLETE_EVENTID = 0x1A05, + WMI_GET_ASSOC_LIST_RES_EVENTID = 0x1A06, + WMI_GET_CCA_INDICATIONS_EVENTID = 0x1A07, + WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID = 0x1A08, + WMI_INTERNAL_FW_EVENT_EVENTID = 0x1A0A, + WMI_INTERNAL_FW_IOCTL_EVENTID = 0x1A0B, WMI_SET_CHANNEL_EVENTID = 0x9000, WMI_ASSOC_REQ_EVENTID = 0x9001, WMI_EAPOL_RX_EVENTID = 0x9002, WMI_MAC_ADDR_RESP_EVENTID = 0x9003, WMI_FW_VER_EVENTID = 0x9004, WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005, + WMI_INTERNAL_FW_SET_CHANNEL = 0x9006, WMI_COMMAND_NOT_SUPPORTED_EVENTID = 0xFFFF, }; @@ -1462,12 +1675,16 @@ enum rf_type { RF_UNKNOWN = 0x00, RF_MARLON = 0x01, RF_SPARROW = 0x02, + RF_TALYNA1 = 0x03, + RF_TALYNA2 = 0x04, }; /* WMI_GET_RF_STATUS_EVENTID */ enum board_file_rf_type { BF_RF_MARLON = 0x00, BF_RF_SPARROW = 0x01, + BF_RF_TALYNA1 = 0x02, + BF_RF_TALYNA2 = 0x03, }; /* WMI_GET_RF_STATUS_EVENTID */ @@ -1507,6 +1724,7 @@ enum baseband_type { BASEBAND_SPARROW_M_C0 = 0x06, BASEBAND_SPARROW_M_D0 = 0x07, BASEBAND_TALYN_M_A0 = 0x08, + BASEBAND_TALYN_M_B0 = 0x09, }; /* WMI_GET_BASEBAND_TYPE_EVENTID */ @@ -1551,7 +1769,11 @@ struct wmi_ready_event { u8 numof_additional_mids; /* rfc read calibration result. 5..15 */ u8 rfc_read_calib_result; - u8 reserved[3]; + /* Max associated STAs supported by FW in AP mode (default 0 means 8 + * STA) + */ + u8 max_assoc_sta; + u8 reserved[2]; } __packed; /* WMI_NOTIFY_REQ_DONE_EVENTID */ @@ -1666,13 +1888,13 @@ enum wmi_pno_result { }; struct wmi_start_sched_scan_event { - /* pno_result */ + /* wmi_pno_result */ u8 result; u8 reserved[3]; } __packed; struct wmi_stop_sched_scan_event { - /* pno_result */ + /* wmi_pno_result */ u8 result; u8 reserved[3]; } __packed; @@ -1739,9 +1961,17 @@ struct wmi_ba_status_event { /* WMI_DELBA_EVENTID */ struct wmi_delba_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 from_initiator; __le16 reason; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_VRING_CFG_DONE_EVENTID */ @@ -1754,13 +1984,24 @@ struct wmi_vring_cfg_done_event { /* WMI_RCP_ADDBA_RESP_SENT_EVENTID */ struct wmi_rcp_addba_resp_sent_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 reserved; __le16 status; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved2[2]; } __packed; /* WMI_RCP_ADDBA_REQ_EVENTID */ struct wmi_rcp_addba_req_event { + /* Used for cid less than 8. For higher cid set + * CIDXTID_EXTENDED_CID_TID here and use cid and tid members instead + */ u8 cidxtid; u8 dialog_token; /* ieee80211_ba_parameterset as it received */ @@ -1768,6 +2009,11 @@ struct wmi_rcp_addba_req_event { __le16 ba_timeout; /* ieee80211_ba_seqstrl field as it received */ __le16 ba_seq_ctrl; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 cid; + /* Used when cidxtid = CIDXTID_EXTENDED_CID_TID */ + u8 tid; + u8 reserved[2]; } __packed; /* WMI_CFG_RX_CHAIN_DONE_EVENTID */ @@ -1942,6 +2188,13 @@ struct wmi_set_high_power_table_params_event { u8 reserved[3]; } __packed; +/* WMI_FIXED_SCHEDULING_UL_CONFIG_EVENTID */ +struct wmi_fixed_scheduling_ul_config_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + /* WMI_TEMP_SENSE_DONE_EVENTID * * Measure MAC and radio temperatures @@ -2290,6 +2543,8 @@ struct wmi_link_maintain_cfg { __le32 bad_beacons_num_threshold; /* SNR limit for bad_beacons_detector */ __le32 bad_beacons_snr_threshold_db; + /* timeout for disassoc response frame in uSec */ + __le32 disconnect_timeout; } __packed; /* WMI_LINK_MAINTAIN_CFG_WRITE_CMDID */ @@ -2519,6 +2774,7 @@ enum wmi_tof_session_end_status { WMI_TOF_SESSION_END_FAIL = 0x01, WMI_TOF_SESSION_END_PARAMS_ERROR = 0x02, WMI_TOF_SESSION_END_ABORTED = 0x03, + WMI_TOF_SESSION_END_BUSY = 0x04, }; /* WMI_TOF_SESSION_END_EVENTID */ @@ -2925,7 +3181,40 @@ struct wmi_set_silent_rssi_table_done_event { __le32 table; } __packed; -/* \WMI_COMMAND_NOT_SUPPORTED_EVENTID */ +/* WMI_VRING_SWITCH_TIMING_CONFIG_EVENTID */ +struct wmi_vring_switch_timing_config_event { + /* enum wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_ASSOC_LIST_RES_EVENTID */ +struct wmi_assoc_sta_info { + u8 mac[WMI_MAC_LEN]; + u8 omni_index_address; + u8 reserved; +} __packed; + +#define WMI_GET_ASSOC_LIST_SIZE (8) + +/* WMI_GET_ASSOC_LIST_RES_EVENTID + * Returns up to MAX_ASSOC_STA_LIST_SIZE associated STAs + */ +struct wmi_get_assoc_list_res_event { + struct wmi_assoc_sta_info assoc_sta_list[WMI_GET_ASSOC_LIST_SIZE]; + /* STA count */ + u8 count; + u8 reserved[3]; +} __packed; + +/* WMI_BF_CONTROL_EVENTID */ +struct wmi_bf_control_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_COMMAND_NOT_SUPPORTED_EVENTID */ struct wmi_command_not_supported_event { /* device id */ u8 mid; @@ -2936,4 +3225,62 @@ struct wmi_command_not_supported_event { __le16 reserved1; } __packed; +/* WMI_TSF_SYNC_CMDID */ +struct wmi_tsf_sync_cmd { + /* The time interval to send announce frame in one BI */ + u8 interval_ms; + /* The mcs to send announce frame */ + u8 mcs; + u8 reserved[6]; +} __packed; + +/* WMI_TSF_SYNC_STATUS_EVENTID */ +enum wmi_tsf_sync_status { + WMI_TSF_SYNC_SUCCESS = 0x00, + WMI_TSF_SYNC_FAILED = 0x01, + WMI_TSF_SYNC_REJECTED = 0x02, +}; + +/* WMI_TSF_SYNC_STATUS_EVENTID */ +struct wmi_tsf_sync_status_event { + /* enum wmi_tsf_sync_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_GET_CCA_INDICATIONS_EVENTID */ +struct wmi_get_cca_indications_event { + /* wmi_fw_status */ + u8 status; + /* CCA-Energy Detect in percentage over last BI (0..100) */ + u8 cca_ed_percent; + /* Averaged CCA-Energy Detect in percent over number of BIs (0..100) */ + u8 cca_ed_avg_percent; + /* NAV percent over last BI (0..100) */ + u8 nav_percent; + /* Averaged NAV percent over number of BIs (0..100) */ + u8 nav_avg_percent; + u8 reserved[3]; +} __packed; + +/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_CMDID */ +struct wmi_set_cca_indications_bi_avg_num_cmd { + /* set the number of bis to average cca_ed (0..255) */ + u8 bi_number; + u8 reserved[3]; +} __packed; + +/* WMI_SET_CCA_INDICATIONS_BI_AVG_NUM_EVENTID */ +struct wmi_set_cca_indications_bi_avg_num_event { + /* wmi_fw_status */ + u8 status; + u8 reserved[3]; +} __packed; + +/* WMI_INTERNAL_FW_SET_CHANNEL */ +struct wmi_internal_fw_set_channel_event { + u8 channel_num; + u8 reserved[3]; +} __packed; + #endif /* __WILOCITY_WMI_H__ */ diff --git a/drivers/net/wireless/atmel/atmel_pci.c b/drivers/net/wireless/atmel/atmel_pci.c index bcf1f274a251..30df58a41a83 100644 --- a/drivers/net/wireless/atmel/atmel_pci.c +++ b/drivers/net/wireless/atmel/atmel_pci.c @@ -61,8 +61,10 @@ static int atmel_pci_probe(struct pci_dev *pdev, dev = init_atmel_card(pdev->irq, pdev->resource[1].start, ATMEL_FW_TYPE_506, &pdev->dev, NULL, NULL); - if (!dev) + if (!dev) { + pci_disable_device(pdev); return -ENODEV; + } pci_set_drvdata(pdev, dev); return 0; diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c index 6837064908be..6b0e1ec346cb 100644 --- a/drivers/net/wireless/broadcom/b43/dma.c +++ b/drivers/net/wireless/broadcom/b43/dma.c @@ -1484,7 +1484,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, int slot, firstused; bool frame_succeed; int skip; - static u8 err_out1, err_out2; + static u8 err_out1; ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) @@ -1518,13 +1518,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, } } else { /* More than a single header/data pair were missed. - * Report this error once. + * Report this error, and reset the controller to + * revive operation. */ - if (!err_out2) - b43dbg(dev->wl, - "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", - ring->index, firstused, slot); - err_out2 = 1; + b43dbg(dev->wl, + "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", + ring->index, firstused, slot); + b43_controller_restart(dev, "Out of order TX"); return; } } diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c index cfa617ddb2f1..2f0c64cef65f 100644 --- a/drivers/net/wireless/broadcom/b43legacy/dma.c +++ b/drivers/net/wireless/broadcom/b43legacy/dma.c @@ -1064,7 +1064,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA); if (!bounce_skb) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 0b68240ec7b4..d2f788d88668 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -963,6 +963,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), @@ -1164,6 +1165,7 @@ static struct sdio_driver brcmf_sdmmc_driver = { #ifdef CONFIG_PM_SLEEP .pm = &brcmf_sdio_pm_ops, #endif /* CONFIG_PM_SLEEP */ + .coredump = brcmf_dev_coredump, }, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 27e693e93f21..c4965184cdf3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -250,6 +250,8 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings); void brcmf_detach(struct device *dev); /* Indication from bus module that dongle should be reset */ void brcmf_dev_reset(struct device *dev); +/* Request from bus module to initiate a coredump */ +void brcmf_dev_coredump(struct device *dev); /* Configure the "global" bus state used by upper layers */ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 89b86251910e..b6122aad639e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1264,7 +1264,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) brcmf_dbg(TRACE, "Enter\n"); if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) { - brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n "); + brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n"); err = brcmf_fil_cmd_data_set(vif->ifp, BRCMF_C_DISASSOC, NULL, 0); if (err) { @@ -2728,9 +2728,8 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg); - struct ieee80211_channel *notify_channel; struct cfg80211_bss *bss; - struct ieee80211_supported_band *band; + enum nl80211_band band; struct brcmu_chan ch; u16 channel; u32 freq; @@ -2738,7 +2737,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, u16 notify_interval; u8 *notify_ie; size_t notify_ielen; - s32 notify_signal; + struct cfg80211_inform_bss bss_data = {}; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { brcmf_err("Bss info is larger than buffer. Discarding\n"); @@ -2753,32 +2752,33 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, channel = bi->ctl_ch; if (channel <= CH_MAX_2G_CHANNEL) - band = wiphy->bands[NL80211_BAND_2GHZ]; + band = NL80211_BAND_2GHZ; else - band = wiphy->bands[NL80211_BAND_5GHZ]; + band = NL80211_BAND_5GHZ; - freq = ieee80211_channel_to_frequency(channel, band->band); - notify_channel = ieee80211_get_channel(wiphy, freq); + freq = ieee80211_channel_to_frequency(channel, band); + bss_data.chan = ieee80211_get_channel(wiphy, freq); + bss_data.scan_width = NL80211_BSS_CHAN_WIDTH_20; + bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); notify_capability = le16_to_cpu(bi->capability); notify_interval = le16_to_cpu(bi->beacon_period); notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); notify_ielen = le32_to_cpu(bi->ie_length); - notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; + bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100; brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID); brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq); brcmf_dbg(CONN, "Capability: %X\n", notify_capability); brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval); - brcmf_dbg(CONN, "Signal: %d\n", notify_signal); + brcmf_dbg(CONN, "Signal: %d\n", bss_data.signal); - bss = cfg80211_inform_bss(wiphy, notify_channel, - CFG80211_BSS_FTYPE_UNKNOWN, - (const u8 *)bi->BSSID, - 0, notify_capability, - notify_interval, notify_ie, - notify_ielen, notify_signal, - GFP_KERNEL); + bss = cfg80211_inform_bss_data(wiphy, &bss_data, + CFG80211_BSS_FTYPE_UNKNOWN, + (const u8 *)bi->BSSID, + 0, notify_capability, + notify_interval, notify_ie, + notify_ielen, GFP_KERNEL); if (!bss) return -ENOMEM; @@ -5498,7 +5498,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, static int generation; u32 event = e->event_code; u32 reason = e->reason; - struct station_info sinfo; + struct station_info *sinfo; brcmf_dbg(CONN, "event %s (%u), reason %d\n", brcmf_fweh_event_name(event), event, reason); @@ -5511,16 +5511,22 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) && (reason == BRCMF_E_STATUS_SUCCESS)) { - memset(&sinfo, 0, sizeof(sinfo)); if (!data) { brcmf_err("No IEs present in ASSOC/REASSOC_IND"); return -EINVAL; } - sinfo.assoc_req_ies = data; - sinfo.assoc_req_ies_len = e->datalen; + + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + + sinfo->assoc_req_ies = data; + sinfo->assoc_req_ies_len = e->datalen; generation++; - sinfo.generation = generation; - cfg80211_new_sta(ndev, e->addr, &sinfo, GFP_KERNEL); + sinfo->generation = generation; + cfg80211_new_sta(ndev, e->addr, sinfo, GFP_KERNEL); + + kfree(sinfo); } else if ((event == BRCMF_E_DISASSOC_IND) || (event == BRCMF_E_DEAUTH_IND) || (event == BRCMF_E_DEAUTH)) { @@ -6512,6 +6518,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_PS_ON_BY_DEFAULT | + WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 3b829fed8631..927d62b3d41b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -689,6 +689,7 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_43525_CHIP_ID: case BRCM_CC_4365_CHIP_ID: case BRCM_CC_4366_CHIP_ID: + case BRCM_CC_43664_CHIP_ID: return 0x200000; case CY_CC_4373_CHIP_ID: return 0x160000; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 105b8774fca9..cd3651069d0c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -36,8 +36,6 @@ MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); MODULE_LICENSE("Dual BSD/GPL"); -const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 #define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index ef914619e8e1..a34642cb4d2f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -19,8 +19,6 @@ #include <linux/platform_data/brcmfmac.h> #include "fwil_types.h" -extern const u8 ALLFFMAC[ETH_ALEN]; - #define BRCMF_FW_ALTPATH_LEN 256 /* Definitions for the module global and device specific settings are defined diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 8d4511eaa9b9..72954fd6df3b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1180,6 +1180,14 @@ void brcmf_dev_reset(struct device *dev) brcmf_fil_cmd_int_set(drvr->iflist[0], BRCMF_C_TERMINATED, 1); } +void brcmf_dev_coredump(struct device *dev) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + + if (brcmf_debug_create_memdump(bus_if, NULL, 0) < 0) + brcmf_dbg(TRACE, "failed to create coredump\n"); +} + void brcmf_detach(struct device *dev) { s32 i; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 504832084eca..489b5dfdf5b9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -40,7 +40,8 @@ int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data, if (!dump) return -ENOMEM; - memcpy(dump, data, len); + if (data && len > 0) + memcpy(dump, data, len); err = brcmf_bus_get_memdump(bus, dump + len, ramsize); if (err) { vfree(dump); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 876731c57bf5..800a423c7bc2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -165,6 +165,41 @@ static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) } } +/** + * brcmf_feat_fwcap_debugfs_read() - expose firmware capabilities to debugfs. + * + * @seq: sequence for debugfs entry. + * @data: raw data pointer. + */ +static int brcmf_feat_fwcap_debugfs_read(struct seq_file *seq, void *data) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); + struct brcmf_if *ifp = brcmf_get_ifp(bus_if->drvr, 0); + char caps[MAX_CAPS_BUFFER_SIZE + 1] = { }; + char *tmp; + int err; + + err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); + if (err) { + brcmf_err("could not get firmware cap (%d)\n", err); + return err; + } + + /* Put every capability in a new line */ + for (tmp = caps; *tmp; tmp++) { + if (*tmp == ' ') + *tmp = '\n'; + } + + /* Usually there is a space at the end of capabilities string */ + seq_printf(seq, "%s", caps); + /* So make sure we don't print two line breaks */ + if (tmp > caps && *(tmp - 1) != '\n') + seq_printf(seq, "\n"); + + return 0; +} + void brcmf_feat_attach(struct brcmf_pub *drvr) { struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); @@ -233,6 +268,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) void brcmf_feat_debugfs_create(struct brcmf_pub *drvr) { brcmf_debugfs_add_entry(drvr, "features", brcmf_feat_debugfs_read); + brcmf_debugfs_add_entry(drvr, "fwcap", brcmf_feat_fwcap_debugfs_read); } bool brcmf_feat_is_enabled(struct brcmf_if *ifp, enum brcmf_feat_id id) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 94e177d7c9b5..9095b830ae4d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -634,7 +634,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, struct brcmf_fw_request * brcmf_fw_alloc_request(u32 chip, u32 chiprev, - struct brcmf_firmware_mapping mapping_table[], + const struct brcmf_firmware_mapping mapping_table[], u32 table_size, struct brcmf_fw_name *fwnames, u32 n_fwnames) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index 79a21095c349..2893e56910f0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -80,7 +80,7 @@ struct brcmf_fw_name { struct brcmf_fw_request * brcmf_fw_alloc_request(u32 chip, u32 chiprev, - struct brcmf_firmware_mapping mapping_table[], + const struct brcmf_firmware_mapping mapping_table[], u32 table_size, struct brcmf_fw_name *fwnames, u32 n_fwnames); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c index d0b738da2458..d0d8b32af7d0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c @@ -46,6 +46,8 @@ static const u8 brcmf_flowring_prio2fifo[] = { 3 }; +static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + static bool brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h index f93ba6be1ef8..692235d25277 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h @@ -27,8 +27,10 @@ #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40 #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE 32 #define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE 24 -#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 16 -#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 32 +#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7 16 +#define BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 24 +#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 32 +#define BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 40 #define BRCMF_H2D_TXFLOWRING_ITEMSIZE 48 struct msgbuf_buf_addr { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index bcef208a81a5..4b2149b48362 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -2073,6 +2073,13 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, } pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + + /* firmware requires unique mac address for p2pdev interface */ + if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) { + brcmf_err("discovery vif must be different from primary interface\n"); + return ERR_PTR(-EINVAL); + } + brcmf_p2p_generate_bss_mac(p2p, addr); brcmf_p2p_set_firmware(pri_ifp, p2p->dev_addr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 091c191ce259..45928b5b8d97 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -59,7 +59,7 @@ BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie"); BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie"); BRCMF_FW_DEF(4371, "brcmfmac4371-pcie"); -static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { +static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), @@ -75,6 +75,7 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), + BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), }; @@ -104,7 +105,8 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_PCIE2REG_MAILBOXMASK 0x4C #define BRCMF_PCIE_PCIE2REG_CONFIGADDR 0x120 #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124 -#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140 +#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140 +#define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144 #define BRCMF_PCIE2_INTA 0x01 #define BRCMF_PCIE2_INTB 0x02 @@ -134,11 +136,13 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_PCIE_MB_INT_D2H3_DB0 | \ BRCMF_PCIE_MB_INT_D2H3_DB1) +#define BRCMF_PCIE_SHARED_VERSION_7 7 #define BRCMF_PCIE_MIN_SHARED_VERSION 5 -#define BRCMF_PCIE_MAX_SHARED_VERSION 6 +#define BRCMF_PCIE_MAX_SHARED_VERSION BRCMF_PCIE_SHARED_VERSION_7 #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF #define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000 #define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000 +#define BRCMF_PCIE_SHARED_HOSTRDY_DB1 0x10000000 #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000 #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000 @@ -178,6 +182,7 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_D2H_DEV_D3_ACK 0x00000001 #define BRCMF_D2H_DEV_DS_ENTER_REQ 0x00000002 #define BRCMF_D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define BRCMF_D2H_DEV_FWHALT 0x10000000 #define BRCMF_H2D_HOST_D3_INFORM 0x00000001 #define BRCMF_H2D_HOST_DS_ACK 0x00000002 @@ -315,6 +320,14 @@ static const u32 brcmf_ring_max_item[BRCMF_NROF_COMMON_MSGRINGS] = { BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM }; +static const u32 brcmf_ring_itemsize_pre_v7[BRCMF_NROF_COMMON_MSGRINGS] = { + BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, + BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, + BRCMF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, + BRCMF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7, + BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 +}; + static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = { BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, BRCMF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, @@ -705,6 +718,10 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) devinfo->mbdata_completed = true; wake_up(&devinfo->mbdata_resp_wait); } + if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) { + brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n"); + brcmf_dev_coredump(&devinfo->pdev->dev); + } } @@ -781,6 +798,12 @@ static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) BRCMF_PCIE_MB_INT_FN0_1); } +static void brcmf_pcie_hostready(struct brcmf_pciedev_info *devinfo) +{ + if (devinfo->shared.flags & BRCMF_PCIE_SHARED_HOSTRDY_DB1) + brcmf_pcie_write_reg32(devinfo, + BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_1, 1); +} static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg) { @@ -923,7 +946,7 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx) brcmf_dbg(PCIE, "RING !\n"); /* Any arbitrary value will do, lets use 1 */ - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1); + brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX_0, 1); return 0; } @@ -998,8 +1021,14 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, struct brcmf_pcie_ringbuf *ring; u32 size; u32 addr; + const u32 *ring_itemsize_array; + + if (devinfo->shared.version < BRCMF_PCIE_SHARED_VERSION_7) + ring_itemsize_array = brcmf_ring_itemsize_pre_v7; + else + ring_itemsize_array = brcmf_ring_itemsize; - size = brcmf_ring_max_item[ring_id] * brcmf_ring_itemsize[ring_id]; + size = brcmf_ring_max_item[ring_id] * ring_itemsize_array[ring_id]; dma_buf = brcmf_pcie_init_dmabuffer_for_device(devinfo, size, tcm_ring_phys_addr + BRCMF_RING_MEM_BASE_ADDR_OFFSET, &dma_handle); @@ -1009,7 +1038,7 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, addr = tcm_ring_phys_addr + BRCMF_RING_MAX_ITEM_OFFSET; brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_max_item[ring_id]); addr = tcm_ring_phys_addr + BRCMF_RING_LEN_ITEMS_OFFSET; - brcmf_pcie_write_tcm16(devinfo, addr, brcmf_ring_itemsize[ring_id]); + brcmf_pcie_write_tcm16(devinfo, addr, ring_itemsize_array[ring_id]); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { @@ -1018,7 +1047,7 @@ brcmf_pcie_alloc_dma_and_ring(struct brcmf_pciedev_info *devinfo, u32 ring_id, return NULL; } brcmf_commonring_config(&ring->commonring, brcmf_ring_max_item[ring_id], - brcmf_ring_itemsize[ring_id], dma_buf); + ring_itemsize_array[ring_id], dma_buf); ring->dma_handle = dma_handle; ring->devinfo = devinfo; brcmf_commonring_register_cb(&ring->commonring, @@ -1727,6 +1756,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret, init_waitqueue_head(&devinfo->mbdata_resp_wait); brcmf_pcie_intr_enable(devinfo); + brcmf_pcie_hostready(devinfo); if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0) return; @@ -1949,6 +1979,7 @@ static int brcmf_pcie_pm_leave_D3(struct device *dev) brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2); brcmf_bus_change_state(bus, BRCMF_BUS_UP); brcmf_pcie_intr_enable(devinfo); + brcmf_pcie_hostready(devinfo); return 0; } @@ -2018,6 +2049,7 @@ static struct pci_driver brcmf_pciedrvr = { #ifdef CONFIG_PM .driver.pm = &brcmf_pciedrvr_pm, #endif + .driver.coredump = brcmf_dev_coredump, }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 1037df7297bb..c99a191e8d69 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -619,7 +619,7 @@ BRCMF_FW_DEF(4354, "brcmfmac4354-sdio"); BRCMF_FW_DEF(4356, "brcmfmac4356-sdio"); BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); -static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { +static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x0000001F, 43241B0), BRCMF_FW_ENTRY(BRCM_CC_43241_CHIP_ID, 0x00000020, 43241B4), @@ -1072,8 +1072,10 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus) bus->sdcnt.f1regdata += 2; /* dongle indicates the firmware has halted/crashed */ - if (hmb_data & HMB_DATA_FWHALT) + if (hmb_data & HMB_DATA_FWHALT) { brcmf_err("mailbox indicates firmware halted\n"); + brcmf_dev_coredump(&sdiod->func1->dev); + } /* Dongle recomposed rx frames, accept them again */ if (hmb_data & HMB_DATA_NAKHANDLED) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index a0873adcc01c..a4308c6e72d7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -52,7 +52,7 @@ BRCMF_FW_DEF(43242A, "brcmfmac43242a"); BRCMF_FW_DEF(43569, "brcmfmac43569"); BRCMF_FW_DEF(4373, "brcmfmac4373"); -static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { +static const struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), BRCMF_FW_ENTRY(BRCM_CC_43235_CHIP_ID, 0x00000008, 43236B), BRCMF_FW_ENTRY(BRCM_CC_43236_CHIP_ID, 0x00000008, 43236B), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index 93d4cde0eb31..9d830d27b229 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -3388,13 +3388,8 @@ void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode) u8 phybw40; phybw40 = CHSPEC_IS40(pi->radio_chanspec); - if (LCNREV_LT(pi->pubpi.phy_rev, 2)) { - mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); - mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); - } else { - mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); - mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); - } + mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5); + mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9); if (phybw40 == 0) { mod_phy_reg((pi), 0x410, diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index 57544a3a3ce4..686f7a85a045 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -57,6 +57,7 @@ #define BRCM_CC_43602_CHIP_ID 43602 #define BRCM_CC_4365_CHIP_ID 0x4365 #define BRCM_CC_4366_CHIP_ID 0x4366 +#define BRCM_CC_43664_CHIP_ID 43664 #define BRCM_CC_4371_CHIP_ID 0x4371 #define CY_CC_4373_CHIP_ID 0x4373 diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 236b52423506..7c4f550a1475 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -3732,7 +3732,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"), IPW2100_ORD(ASSOCIATED_AP_PTR, "0 if not associated, else pointer to AP table entry"), IPW2100_ORD(AVAILABLE_AP_CNT, - "AP's decsribed in the AP table"), + "AP's described in the AP table"), IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"), IPW2100_ORD(STAT_AP_ASSNS, "associations"), IPW2100_ORD(STAT_ASSN_FAIL, "association failures"), diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h index 193947865efd..ce3e35f6b60f 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h @@ -1009,7 +1009,7 @@ typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW IPW_ORD_STAT_PERCENT_RETRIES, // current calculation of % missed tx retries IPW_ORD_ASSOCIATED_AP_PTR, // If associated, this is ptr to the associated // AP table entry. set to 0 if not associated - IPW_ORD_AVAILABLE_AP_CNT, // # of AP's decsribed in the AP table + IPW_ORD_AVAILABLE_AP_CNT, // # of AP's described in the AP table IPW_ORD_AP_LIST_PTR, // Ptr to list of available APs IPW_ORD_STAT_AP_ASSNS, // # of associations IPW_ORD_STAT_ASSN_FAIL, // # of association failures diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 87a5e414c2f7..f26beeb6c5ff 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -7557,8 +7557,7 @@ static int ipw_associate(void *data) } if (priv->status & STATUS_DISASSOCIATING) { - IPW_DEBUG_ASSOC("Not attempting association (in " - "disassociating)\n "); + IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n"); schedule_work(&priv->associate); return 0; } @@ -12012,7 +12011,7 @@ MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)" #ifdef CONFIG_IPW2200_QOS module_param(qos_enable, int, 0444); -MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis"); +MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities"); module_param(qos_burst_enable, int, 0444); MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode"); diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index e6205eae51fd..4d08d78c6b71 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -13,7 +13,7 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o -iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o +iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index b2573b1d1506..591687984962 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -27,7 +28,6 @@ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" -#include "iwl-csr.h" #include "iwl-agn-hw.h" /* Highest firmware API version supported */ @@ -91,7 +91,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = { .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", @@ -117,7 +118,8 @@ const struct iwl_cfg iwl1000_bg_cfg = { .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index 1b32ad413b9e..a63ca8820568 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -115,7 +116,8 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = { .base_params = &iwl2000_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl2000_2bgn_cfg = { @@ -142,7 +144,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = { .base_params = &iwl2030_base_params, \ .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl2030_2bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", @@ -163,7 +166,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = { .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl105_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", @@ -190,7 +194,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = { .eeprom_params = &iwl20x0_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl135_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index dffd9df782b0..d4ba66aecdc9 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -54,7 +54,6 @@ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" -#include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL_22000_UCODE_API_MAX 38 @@ -115,8 +114,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_22000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl_22000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ @@ -137,13 +134,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .gen2 = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ - .tx_cmd_queue_size = 32, \ .min_umac_error_event_table = 0x400000 const struct iwl_cfg iwl22000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -154,6 +151,7 @@ const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_CDB_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -165,6 +163,7 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_JF_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -175,6 +174,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -185,6 +185,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_F0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -195,6 +196,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_JF_B0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, @@ -205,6 +207,7 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A0_FW_PRE, IWL_DEVICE_22000, + .csr = &iwl_csr_v1, .ht_params = &iwl_22000_ht_params, .nvm_ver = IWL_22000_NVM_VERSION, .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index 4aa8f0a05c8a..a224f1be1ec2 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -28,7 +29,6 @@ #include <linux/stringify.h> #include "iwl-config.h" #include "iwl-agn-hw.h" -#include "iwl-csr.h" /* Highest firmware API version supported */ #define IWL5000_UCODE_API_MAX 5 @@ -89,7 +89,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = { .base_params = &iwl5000_base_params, \ .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl5300_agn_cfg = { .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", @@ -153,7 +154,8 @@ const struct iwl_cfg iwl5350_agn_cfg = { .eeprom_params = &iwl5000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl5150_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index 39335b7b0c16..dbcec7ce7863 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -135,7 +136,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = { .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6005_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN", @@ -189,7 +191,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6030_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN", @@ -225,7 +228,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .base_params = &iwl6000_g2_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6035_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", @@ -280,7 +284,8 @@ const struct iwl_cfg iwl130_bg_cfg = { .base_params = &iwl6000_base_params, \ .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6000i_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", @@ -313,7 +318,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = { .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6050_2agn_cfg = { .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", @@ -339,7 +345,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = { .eeprom_params = &iwl6000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .internal_wimax_coex = true, \ - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl6150_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN", @@ -366,6 +373,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = { .eeprom_params = &iwl6000_eeprom_params, .ht_params = &iwl6000_ht_params, .led_mode = IWL_LED_BLINK, + .csr = &iwl_csr_v1, }; MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index ce741beec1fc..69bfa827e82a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -7,7 +7,8 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +35,8 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,7 +70,6 @@ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" -#include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 @@ -160,14 +161,13 @@ static const struct iwl_ht_params iwl7000_ht_params = { #define IWL_DEVICE_7000_COMMON \ .device_family = IWL_DEVICE_FAMILY_7000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ .non_shared_ant = ANT_A, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ - .dccm_offset = IWL7000_DCCM_OFFSET + .dccm_offset = IWL7000_DCCM_OFFSET, \ + .csr = &iwl_csr_v1 #define IWL_DEVICE_7000 \ IWL_DEVICE_7000_COMMON, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 3f4d9bac9f73..7262e973e0d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -7,7 +7,8 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +35,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -67,7 +69,6 @@ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" -#include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL8000_UCODE_API_MAX 36 @@ -140,8 +141,6 @@ static const struct iwl_tt_params iwl8000_tt_params = { #define IWL_DEVICE_8000_COMMON \ .device_family = IWL_DEVICE_FAMILY_8000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ @@ -158,7 +157,8 @@ static const struct iwl_tt_params iwl8000_tt_params = { .apmg_not_supported = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ - .min_umac_error_event_table = 0x800000 + .min_umac_error_event_table = 0x800000, \ + .csr = &iwl_csr_v1 #define IWL_DEVICE_8000 \ IWL_DEVICE_8000_COMMON, \ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index e1c869a1f8cc..e20c30b29c03 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -54,7 +54,6 @@ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" -#include "iwl-agn-hw.h" #include "fw/file.h" /* Highest firmware API version supported */ @@ -135,12 +134,10 @@ static const struct iwl_tt_params iwl9000_tt_params = { .ucode_api_max = IWL9000_UCODE_API_MAX, \ .ucode_api_min = IWL9000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_9000, \ - .max_inst_size = IWL60_RTC_INST_SIZE, \ - .max_data_size = IWL60_RTC_DATA_SIZE, \ .base_params = &iwl9000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000, \ - .non_shared_ant = ANT_A, \ + .non_shared_ant = ANT_B, \ .dccm_offset = IWL9000_DCCM_OFFSET, \ .dccm_len = IWL9000_DCCM_LEN, \ .dccm2_offset = IWL9000_DCCM2_OFFSET, \ @@ -156,7 +153,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { .rf_id = true, \ .nvm_type = IWL_NVM_EXT, \ .dbgc_supported = true, \ - .min_umac_error_event_table = 0x800000 + .min_umac_error_event_table = 0x800000, \ + .csr = &iwl_csr_v1 const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index e68254e12764..030482b357a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1200,16 +1200,16 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) return -EINVAL; } - if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable && - !data->sku_cap_band_52GHz_enable) { + if (!data->sku_cap_11n_enable && !data->sku_cap_band_24ghz_enable && + !data->sku_cap_band_52ghz_enable) { IWL_ERR(priv, "Invalid device sku\n"); return -EINVAL; } IWL_DEBUG_INFO(priv, "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n", - data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled", - data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled", + data->sku_cap_band_24ghz_enable ? "" : "NOT", "enabled", + data->sku_cap_band_52ghz_enable ? "" : "NOT", "enabled", data->sku_cap_11n_enable ? "" : "NOT", "enabled"); priv->hw_params.tx_chains_num = diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index a57c7223df0f..5f6e855006dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -88,11 +88,6 @@ enum iwl_data_path_subcmd_ids { TLC_MNG_CONFIG_CMD = 0xF, /** - * @TLC_MNG_NOTIF_REQ_CMD: &struct iwl_tlc_notif_req_config_cmd - */ - TLC_MNG_NOTIF_REQ_CMD = 0x10, - - /** * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif */ TLC_MNG_UPDATE_NOTIF = 0xF7, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 37c57bcbfb4a..8d6dc9189985 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -190,22 +190,36 @@ struct iwl_nvm_get_info_general { } __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ /** + * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku + * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled + * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled + * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled + * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled + * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled + * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled + * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled + * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled + * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled + */ +enum iwl_nvm_mac_sku_flags { + NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED = BIT(0), + NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1), + NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2), + NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3), + NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4), + NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5), + NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8), + NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED = BIT(14), + NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED = BIT(15), +}; + +/** * struct iwl_nvm_get_info_sku - mac information - * @enable_24g: band 2.4G enabled - * @enable_5g: band 5G enabled - * @enable_11n: 11n enabled - * @enable_11ac: 11ac enabled - * @mimo_disable: MIMO enabled - * @ext_crypto: Extended crypto enabled + * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags */ struct iwl_nvm_get_info_sku { - __le32 enable_24g; - __le32 enable_5g; - __le32 enable_11n; - __le32 enable_11ac; - __le32 mimo_disable; - __le32 ext_crypto; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ + __le32 mac_sku_flags; +} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */ /** * struct iwl_nvm_get_info_phy - phy information @@ -243,7 +257,7 @@ struct iwl_nvm_get_info_rsp { struct iwl_nvm_get_info_sku mac_sku; struct iwl_nvm_get_info_phy phy_sku; struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */ /** * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index e49a6f7be613..21e13a315421 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,62 +66,38 @@ /** * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags - * @IWL_TLC_MNG_CFG_FLAGS_CCK_MSK: CCK support - * @IWL_TLC_MNG_CFG_FLAGS_DD_MSK: enable DD * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC - * @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER - * @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM */ enum iwl_tlc_mng_cfg_flags { - IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0), - IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1), - IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2), - IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(3), - IWL_TLC_MNG_CFG_FLAGS_BF_MSK = BIT(4), - IWL_TLC_MNG_CFG_FLAGS_DCM_MSK = BIT(5), + IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(0), + IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK = BIT(1), }; /** * enum iwl_tlc_mng_cfg_cw - channel width options - * @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel - * @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value + * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel + * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value */ enum iwl_tlc_mng_cfg_cw { - IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ, - IWL_TLC_MNG_MAX_CH_WIDTH_LAST = IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ, + IWL_TLC_MNG_CH_WIDTH_20MHZ, + IWL_TLC_MNG_CH_WIDTH_40MHZ, + IWL_TLC_MNG_CH_WIDTH_80MHZ, + IWL_TLC_MNG_CH_WIDTH_160MHZ, + IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ, }; /** * enum iwl_tlc_mng_cfg_chains - possible chains * @IWL_TLC_MNG_CHAIN_A_MSK: chain A * @IWL_TLC_MNG_CHAIN_B_MSK: chain B - * @IWL_TLC_MNG_CHAIN_C_MSK: chain C */ enum iwl_tlc_mng_cfg_chains { IWL_TLC_MNG_CHAIN_A_MSK = BIT(0), IWL_TLC_MNG_CHAIN_B_MSK = BIT(1), - IWL_TLC_MNG_CHAIN_C_MSK = BIT(2), -}; - -/** - * enum iwl_tlc_mng_cfg_gi - guard interval options - * @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ - * @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ - * @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ - * @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ - */ -enum iwl_tlc_mng_cfg_gi { - IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0), - IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1), - IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2), - IWL_TLC_MNG_SGI_160MHZ_MSK = BIT(3), }; /** @@ -145,25 +123,7 @@ enum iwl_tlc_mng_cfg_mode { }; /** - * enum iwl_tlc_mng_vht_he_types - VHT HE types - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based - * @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types - */ -enum iwl_tlc_mng_vht_he_types { - IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED, - IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM = - IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED, - -}; - -/** - * enum iwl_tlc_mng_ht_rates - HT/VHT rates + * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0 * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1 * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2 @@ -174,6 +134,8 @@ enum iwl_tlc_mng_vht_he_types { * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7 * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8 * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9 + * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10 + * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11 * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT */ enum iwl_tlc_mng_ht_rates { @@ -187,81 +149,73 @@ enum iwl_tlc_mng_ht_rates { IWL_TLC_MNG_HT_RATE_MCS7, IWL_TLC_MNG_HT_RATE_MCS8, IWL_TLC_MNG_HT_RATE_MCS9, - IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS9, + IWL_TLC_MNG_HT_RATE_MCS10, + IWL_TLC_MNG_HT_RATE_MCS11, + IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11, }; /* Maximum supported tx antennas number */ -#define MAX_RS_ANT_NUM 3 +#define MAX_NSS 2 /** * struct tlc_config_cmd - TLC configuration * @sta_id: station id * @reserved1: reserved - * @max_supp_ch_width: channel width - * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags - * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains - * @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported - * @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types - * @non_ht_supp_rates: bitmap of supported legacy rates - * @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9 + * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw * @mode: &enum iwl_tlc_mng_cfg_mode - * @reserved2: reserved - * @he_supp_rates: bitmap of supported HE rates + * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains + * @amsdu: TX amsdu is supported + * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags + * @non_ht_rates: bitmap of supported legacy rates + * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width> + * pair (0 - 80mhz width and below, 1 - 160mhz). + * @max_mpdu_len: max MPDU length, in bytes * @sgi_ch_width_supp: bitmap of SGI support per channel width - * @he_gi_support: 11ax HE guard interval - * @max_ampdu_cnt: max AMPDU size (frames count) + * use BIT(@enum iwl_tlc_mng_cfg_cw) + * @reserved2: reserved */ struct iwl_tlc_config_cmd { u8 sta_id; u8 reserved1[3]; - u8 max_supp_ch_width; + u8 max_ch_width; + u8 mode; u8 chains; - u8 max_supp_ss; - u8 valid_vht_he_types; + u8 amsdu; __le16 flags; - __le16 non_ht_supp_rates; - __le16 ht_supp_rates[MAX_RS_ANT_NUM]; - u8 mode; - u8 reserved2; - __le16 he_supp_rates; + __le16 non_ht_rates; + __le16 ht_rates[MAX_NSS][2]; + __le16 max_mpdu_len; u8 sgi_ch_width_supp; - u8 he_gi_support; - __le32 max_ampdu_cnt; -} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_1 */ - -#define IWL_TLC_NOTIF_INIT_RATE_POS 0 -#define IWL_TLC_NOTIF_INIT_RATE_MSK BIT(IWL_TLC_NOTIF_INIT_RATE_POS) -#define IWL_TLC_NOTIF_REQ_INTERVAL (500) + u8 reserved2[1]; +} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */ /** - * struct iwl_tlc_notif_req_config_cmd - request notif on specific changes - * @sta_id: relevant station - * @reserved1: reserved - * @flags: bitmap of requested notifications %IWL_TLC_NOTIF_INIT_\* - * @interval: minimum time between notifications from TLC to the driver (msec) - * @reserved2: reserved + * enum iwl_tlc_update_flags - updated fields + * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update + * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update */ -struct iwl_tlc_notif_req_config_cmd { - u8 sta_id; - u8 reserved1; - __le16 flags; - __le16 interval; - __le16 reserved2; -} __packed; /* TLC_MNG_NOTIF_REQ_CMD_API_S_VER_1 */ +enum iwl_tlc_update_flags { + IWL_TLC_NOTIF_FLAG_RATE = BIT(0), + IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1), +}; /** * struct iwl_tlc_update_notif - TLC notification from FW * @sta_id: station id * @reserved: reserved * @flags: bitmap of notifications reported - * @values: field per flag in struct iwl_tlc_notif_req_config_cmd + * @rate: current initial rate + * @amsdu_size: Max AMSDU size, in bytes + * @amsdu_enabled: bitmap for per-TID AMSDU enablement */ struct iwl_tlc_update_notif { u8 sta_id; - u8 reserved; - __le16 flags; - __le32 values[16]; -} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_1 */ + u8 reserved[3]; + __le32 flags; + __le32 rate; + __le32 amsdu_size; + __le32 amsdu_enabled; +} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ /** * enum iwl_tlc_debug_flags - debug options diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index e7565f37ece9..7e570c4a9df0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -295,6 +295,7 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, + IWL_RX_MPDU_STATUS_SEC_UNKNOWN = IWL_RX_MPDU_STATUS_SEC_MASK, IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index dfa111bb411e..6ac240b6eace 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -131,6 +131,8 @@ enum iwl_tx_queue_cfg_actions { TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), }; +#define IWL_DEFAULT_QUEUE_SIZE 256 +#define IWL_MGMT_QUEUE_SIZE 16 /** * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command * @sta_id: station id diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 72259bff9922..507d9a49fa97 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -227,4 +227,40 @@ static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt) cancel_delayed_work_sync(&fwrt->dump.wk); } +#ifdef CONFIG_IWLWIFI_DEBUGFS +static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) +{ + fwrt->timestamp.delay = 0; + cancel_delayed_work_sync(&fwrt->timestamp.wk); +} + +void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); + +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) +{ + cancel_delayed_work_sync(&fwrt->timestamp.wk); +} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) +{ + if (!fwrt->timestamp.delay) + return; + + schedule_delayed_work(&fwrt->timestamp.wk, + round_jiffies_relative(fwrt->timestamp.delay)); +} + +#else + +static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} + +static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, + u32 delay) {} + +static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} + +static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} + +#endif /* CONFIG_IWLWIFI_DEBUGFS */ + #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 8f005cd69559..8ba5a60ec9ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -64,6 +64,7 @@ *****************************************************************************/ #include "api/commands.h" #include "debugfs.h" +#include "dbg.h" #define FWRT_DEBUGFS_READ_FILE_OPS(name) \ static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index d93f6a4bb22d..cbbfa8e9e66d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -69,28 +69,6 @@ int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir); -static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) -{ - fwrt->timestamp.delay = 0; - cancel_delayed_work_sync(&fwrt->timestamp.wk); -} - -static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) -{ - cancel_delayed_work_sync(&fwrt->timestamp.wk); -} - -static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) -{ - if (!fwrt->timestamp.delay) - return; - - schedule_delayed_work(&fwrt->timestamp.wk, - round_jiffies_relative(fwrt->timestamp.delay)); -} - -void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay); - #else static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) @@ -98,13 +76,4 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, return 0; } -static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} - -static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {} - -static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} - -static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, - u32 delay) {} - #endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 9b2805e1e3b1..9d939cbaf6c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -143,6 +145,7 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_DBG_TRIGGER = 40, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, + IWL_UCODE_TLV_IML = 52, }; struct iwl_ucode_tlv { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index b23ffe12ad84..f4912382b6af 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -241,6 +243,8 @@ enum iwl_fw_type { * @ucode_ver: ucode version from the ucode file * @fw_version: firmware version string * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan. + * @iml_len: length of the image loader image + * @iml: image loader fw image * @ucode_capa: capabilities parsed from the ucode file. * @enhance_sensitivity_table: device can do enhanced sensitivity. * @init_evtlog_ptr: event log offset for init ucode. @@ -267,6 +271,8 @@ struct iwl_fw { /* ucode images */ struct fw_img img[IWL_UCODE_TYPE_MAX]; + size_t iml_len; + u8 *iml; struct iwl_ucode_capabilities ucode_capa; bool enhance_sensitivity_table; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c deleted file mode 100644 index bd2e1fb43f5a..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c +++ /dev/null @@ -1,162 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "iwl-drv.h" -#include "runtime.h" -#include "fw/api/nvm-reg.h" -#include "fw/api/commands.h" -#include "iwl-nvm-parse.h" - -struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) -{ - struct iwl_nvm_get_info cmd = {}; - struct iwl_nvm_get_info_rsp *rsp; - struct iwl_trans *trans = fwrt->trans; - struct iwl_nvm_data *nvm; - struct iwl_host_cmd hcmd = { - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &cmd, }, - .len = { sizeof(cmd) }, - .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) - }; - int ret; - bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&fwrt->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - ret = iwl_trans_send_cmd(trans, &hcmd); - if (ret) - return ERR_PTR(ret); - - if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), - "Invalid payload len in NVM response from FW %d", - iwl_rx_packet_payload_len(hcmd.resp_pkt))) { - ret = -EINVAL; - goto out; - } - - rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) - IWL_INFO(fwrt, "OTP is empty\n"); - - nvm = kzalloc(sizeof(*nvm) + - sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, - GFP_KERNEL); - if (!nvm) { - ret = -ENOMEM; - goto out; - } - - iwl_set_hw_address_from_csr(trans, nvm); - /* TODO: if platform NVM has MAC address - override it here */ - - if (!is_valid_ether_addr(nvm->hw_addr)) { - IWL_ERR(fwrt, "no valid mac address was found\n"); - ret = -EINVAL; - goto err_free; - } - - IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); - - /* Initialize general data */ - nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); - - /* Initialize MAC sku data */ - nvm->sku_cap_11ac_enable = - le32_to_cpu(rsp->mac_sku.enable_11ac); - nvm->sku_cap_11n_enable = - le32_to_cpu(rsp->mac_sku.enable_11n); - nvm->sku_cap_band_24GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_24g); - nvm->sku_cap_band_52GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_5g); - nvm->sku_cap_mimo_disabled = - le32_to_cpu(rsp->mac_sku.mimo_disable); - - /* Initialize PHY sku data */ - nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); - nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - - /* Initialize regulatory data */ - nvm->lar_enabled = - le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported; - - iwl_init_sbands(trans->dev, trans->cfg, nvm, - rsp->regulatory.channel_profile, - nvm->valid_tx_ant & fwrt->fw->valid_tx_ant, - nvm->valid_rx_ant & fwrt->fw->valid_rx_ant, - nvm->lar_enabled, false); - - iwl_free_resp(&hcmd); - return nvm; - -err_free: - kfree(nvm); -out: - iwl_free_resp(&hcmd); - return ERR_PTR(ret); -} -IWL_EXPORT_SYMBOL(iwl_fw_get_nvm); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 1fec8e3a6b35..9b8dd7fe7112 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -163,7 +165,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, const struct fw_img *image) { - int sec_idx, idx; + int sec_idx, idx, ret; u32 offset = 0; /* @@ -190,17 +192,23 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, */ if (sec_idx >= image->num_sec - 1) { IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); - iwl_free_fw_paging(fwrt); - return -EINVAL; + ret = -EINVAL; + goto err; } /* copy the CSS block to the dram */ IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", sec_idx); + if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) { + IWL_ERR(fwrt, "CSS block is larger than paging size\n"); + ret = -EINVAL; + goto err; + } + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), image->sec[sec_idx].data, - fwrt->fw_paging_db[0].fw_paging_size); + image->sec[sec_idx].len); dma_sync_single_for_device(fwrt->trans->dev, fwrt->fw_paging_db[0].fw_paging_phys, fwrt->fw_paging_db[0].fw_paging_size, @@ -213,17 +221,39 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, sec_idx++; /* - * copy the paging blocks to the dram - * loop index start from 1 since that CSS block already copied to dram - * and CSS index is 0. - * loop stop at num_of_paging_blk since that last block is not full. + * Copy the paging blocks to the dram. The loop index starts + * from 1 since the CSS block (index 0) was already copied to + * dram. We use num_of_paging_blk + 1 to account for that. */ - for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { + for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) { struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + int remaining = image->sec[sec_idx].len - offset; + int len = block->fw_paging_size; + + /* + * For the last block, we copy all that is remaining, + * for all other blocks, we copy fw_paging_size at a + * time. */ + if (idx == fwrt->num_of_paging_blk) { + len = remaining; + if (remaining != + fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) { + IWL_ERR(fwrt, + "Paging: last block contains more data than expected %d\n", + remaining); + ret = -EINVAL; + goto err; + } + } else if (block->fw_paging_size > remaining) { + IWL_ERR(fwrt, + "Paging: not enough data in other in block %d (%d)\n", + idx, remaining); + ret = -EINVAL; + goto err; + } memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - block->fw_paging_size); + image->sec[sec_idx].data + offset, len); dma_sync_single_for_device(fwrt->trans->dev, block->fw_paging_phys, block->fw_paging_size, @@ -231,30 +261,16 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, IWL_DEBUG_FW(fwrt, "Paging: copied %d paging bytes to block %d\n", - fwrt->fw_paging_db[idx].fw_paging_size, - idx); - - offset += fwrt->fw_paging_db[idx].fw_paging_size; - } - - /* copy the last paging block */ - if (fwrt->num_of_pages_in_last_blk > 0) { - struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + len, idx); - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); - dma_sync_single_for_device(fwrt->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(fwrt, - "Paging: copied %d pages in the last block %d\n", - fwrt->num_of_pages_in_last_blk, idx); + offset += block->fw_paging_size; } return 0; + +err: + iwl_free_fw_paging(fwrt); + return ret; } static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 3fb940ebd74a..d8db1dd100b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -170,6 +170,5 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, struct iwl_rx_cmd_buffer *rxb); -struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f0f5636dd3ea..c503b26793f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -69,6 +71,7 @@ #include <linux/netdevice.h> #include <linux/ieee80211.h> #include <linux/nl80211.h> +#include "iwl-csr.h" enum iwl_device_family { IWL_DEVICE_FAMILY_UNDEFINED, @@ -151,6 +154,8 @@ enum iwl_nvm_type { #define ANT_AC (ANT_A | ANT_C) #define ANT_BC (ANT_B | ANT_C) #define ANT_ABC (ANT_A | ANT_B | ANT_C) +#define MAX_ANT_NUM 3 + static inline u8 num_of_ant(u8 mask) { @@ -283,6 +288,52 @@ struct iwl_pwr_tx_backoff { }; /** + * struct iwl_csr_params + * + * @flag_sw_reset: reset the device + * @flag_mac_clock_ready: + * Indicates MAC (ucode processor, etc.) is powered up and can run. + * Internal resources are accessible. + * NOTE: This does not indicate that the processor is actually running. + * NOTE: This does not indicate that device has completed + * init or post-power-down restore of internal SRAM memory. + * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that + * SRAM is restored and uCode is in normal operation mode. + * This note is relevant only for pre 5xxx devices. + * NOTE: After device reset, this bit remains "0" until host sets + * INIT_DONE + * @flag_init_done: Host sets this to put device into fully operational + * D0 power mode. Host resets this after SW_RESET to put device into + * low power mode. + * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup, + * to allow host access to device-internal resources. Host must wait for + * mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device + * registers. + * @flag_val_mac_access_en: mac access is enabled + * @flag_master_dis: disable master + * @flag_stop_master: stop master + * @addr_sw_reset: address for resetting the device + * @mac_addr0_otp: first part of MAC address from OTP + * @mac_addr1_otp: second part of MAC address from OTP + * @mac_addr0_strap: first part of MAC address from strap + * @mac_addr1_strap: second part of MAC address from strap + */ +struct iwl_csr_params { + u8 flag_sw_reset; + u8 flag_mac_clock_ready; + u8 flag_init_done; + u8 flag_mac_access_req; + u8 flag_val_mac_access_en; + u8 flag_master_dis; + u8 flag_stop_master; + u8 addr_sw_reset; + u32 mac_addr0_otp; + u32 mac_addr1_otp; + u32 mac_addr0_strap; + u32 mac_addr1_strap; +}; + +/** * struct iwl_cfg * @name: Official name of the device * @fw_name_pre: Firmware filename prefix. The api version and extension @@ -294,8 +345,8 @@ struct iwl_pwr_tx_backoff { * next step. Supported only in integrated solutions. * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. - * @max_inst_size: The maximal length of the fw inst section - * @max_data_size: The maximal length of the fw data section + * @max_inst_size: The maximal length of the fw inst section (only DVM) + * @max_data_size: The maximal length of the fw data section (only DVM) * @valid_tx_ant: valid transmit antenna * @valid_rx_ant: valid receive antenna * @non_shared_ant: the antenna that is for WiFi only @@ -314,6 +365,7 @@ struct iwl_pwr_tx_backoff { * @mac_addr_from_csr: read HW address from CSR registers * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs + * @csr: csr flags and addresses that are different across devices * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the @@ -333,8 +385,6 @@ struct iwl_pwr_tx_backoff { * @gen2: 22000 and on transport operation * @cdb: CDB support * @nvm_type: see &enum iwl_nvm_type - * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as - * the regular queues * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -354,6 +404,7 @@ struct iwl_cfg { const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; const char *default_nvm_file_C_step; const struct iwl_tt_params *thermal_params; + const struct iwl_csr_params *csr; enum iwl_device_family device_family; enum iwl_led_mode led_mode; enum iwl_nvm_type nvm_type; @@ -369,7 +420,7 @@ struct iwl_cfg { u32 soc_latency; u16 nvm_ver; u16 nvm_calib_ver; - u16 rx_with_siso_diversity:1, + u32 rx_with_siso_diversity:1, bt_shared_single_ant:1, internal_wimax_coex:1, host_interrupt_operation_mode:1, @@ -386,7 +437,6 @@ struct iwl_cfg { gen2:1, cdb:1, dbgc_supported:1; - u16 tx_cmd_queue_size; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; @@ -401,6 +451,36 @@ struct iwl_cfg { u32 extra_phy_cfg_flags; }; +static const struct iwl_csr_params iwl_csr_v1 = { + .flag_mac_clock_ready = 0, + .flag_val_mac_access_en = 0, + .flag_init_done = 2, + .flag_mac_access_req = 3, + .flag_sw_reset = 7, + .flag_master_dis = 8, + .flag_stop_master = 9, + .addr_sw_reset = (CSR_BASE + 0x020), + .mac_addr0_otp = 0x380, + .mac_addr1_otp = 0x384, + .mac_addr0_strap = 0x388, + .mac_addr1_strap = 0x38C +}; + +static const struct iwl_csr_params iwl_csr_v2 = { + .flag_init_done = 6, + .flag_mac_clock_ready = 20, + .flag_val_mac_access_en = 20, + .flag_mac_access_req = 21, + .flag_master_dis = 28, + .flag_stop_master = 29, + .flag_sw_reset = 31, + .addr_sw_reset = (CSR_BASE + 0x024), + .mac_addr0_otp = 0x30, + .mac_addr1_otp = 0x34, + .mac_addr0_strap = 0x38, + .mac_addr1_strap = 0x3C +}; + /* * This list declares the config structures for all devices. */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 4f0d070eda54..ba971d3946e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -8,6 +8,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,6 +35,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -257,7 +259,6 @@ /* RESET */ #define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) #define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) -#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) #define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) #define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) #define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) @@ -280,35 +281,10 @@ * 4: GOING_TO_SLEEP * Indicates MAC is entering a power-saving sleep power-down. * Not a good time to access device-internal resources. - * 3: MAC_ACCESS_REQ - * Host sets this to request and maintain MAC wakeup, to allow host - * access to device-internal resources. Host must wait for - * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR - * device registers. - * 2: INIT_DONE - * Host sets this to put device into fully operational D0 power mode. - * Host resets this after SW_RESET to put device into low power mode. - * 0: MAC_CLOCK_READY - * Indicates MAC (ucode processor, etc.) is powered up and can run. - * Internal resources are accessible. - * NOTE: This does not indicate that the processor is actually running. - * NOTE: This does not indicate that device has completed - * init or post-power-down restore of internal SRAM memory. - * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that - * SRAM is restored and uCode is in normal operation mode. - * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and - * do not need to save/restore it. - * NOTE: After device reset, this bit remains "0" until host sets - * INIT_DONE */ -#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) -#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) -#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400) -#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) - #define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) #define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000) #define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index aa2d5c14e202..c59ce4f8a5ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -179,6 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv) for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) kfree(drv->fw.dbg_trigger_tlv[i]); kfree(drv->fw.dbg_mem_tlv); + kfree(drv->fw.iml); for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) iwl_free_fw_img(drv, drv->fw.img + i); @@ -1126,6 +1127,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, pieces->n_dbg_mem_tlv++; break; } + case IWL_UCODE_TLV_IML: { + drv->fw.iml_len = tlv_len; + drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL); + if (!drv->fw.iml) + return -ENOMEM; + break; + } default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; @@ -1842,3 +1850,9 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444); MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)"); + +module_param_named(remove_when_gone, + iwlwifi_mod_params.remove_when_gone, bool, + 0444); +MODULE_PARM_DESC(remove_when_gone, + "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index 3199d345b427..777f5df8a0c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -899,8 +899,8 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, EEPROM_SKU_CAP); data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE; data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE; - data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ; - data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ; + data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ; + data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ; data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index b33888991b94..8be50ed12300 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -81,10 +81,11 @@ struct iwl_nvm_data { __le16 kelvin_voltage; __le16 xtal_calib[2]; - bool sku_cap_band_24GHz_enable; - bool sku_cap_band_52GHz_enable; + bool sku_cap_band_24ghz_enable; + bool sku_cap_band_52ghz_enable; bool sku_cap_11n_enable; bool sku_cap_11ac_enable; + bool sku_cap_11ax_enable; bool sku_cap_amt_enable; bool sku_cap_ipan_enable; bool sku_cap_mimo_disabled; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index f2cea1c7befc..ac965c34a2f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -199,12 +201,12 @@ static int iwl_init_otp_access(struct iwl_trans *trans) /* Enable 40MHz radio clock */ iwl_write32(trans, CSR_GP_CNTRL, iwl_read32(trans, CSR_GP_CNTRL) | - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); /* wait for clock to be ready */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (ret < 0) { IWL_ERR(trans, "Time out access OTP\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index a41c46e63eb1..a7dd8a8cddf9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -122,6 +122,7 @@ enum iwl_uapsd_disable { * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. + * @remove_when_gone: remove an inaccessible device from the PCIe bus. */ struct iwl_mod_params { int swcrypto; @@ -143,6 +144,7 @@ struct iwl_mod_params { bool lar_disable; bool fw_monitor; bool disable_11ac; + bool remove_when_gone; }; #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index ca0174680af9..b815ba38dbdb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -8,6 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,7 @@ #include <linux/export.h> #include <linux/etherdevice.h> #include <linux/pci.h> +#include <linux/firmware.h> #include "iwl-drv.h" #include "iwl-modparams.h" @@ -77,6 +80,9 @@ #include "iwl-csr.h" #include "fw/acpi.h" #include "fw/api/nvm-reg.h" +#include "fw/api/commands.h" +#include "fw/api/cmdhdr.h" +#include "fw/img.h" /* NVM offsets (in words) definitions */ enum nvm_offsets { @@ -292,7 +298,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 * const nvm_ch_flags, - bool lar_supported, bool no_wide_in_5ghz) + u32 sbands_flags) { int ch_idx; int n_channels = 0; @@ -316,11 +322,12 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); - if (is_5ghz && !data->sku_cap_band_52GHz_enable) + if (is_5ghz && !data->sku_cap_band_52ghz_enable) continue; /* workaround to disable wide channels in 5GHz */ - if (no_wide_in_5ghz && is_5ghz) { + if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && + is_5ghz) { ch_flags &= ~(NVM_CHANNEL_40MHZ | NVM_CHANNEL_80MHZ | NVM_CHANNEL_160MHZ); @@ -329,7 +336,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, if (ch_flags & NVM_CHANNEL_160MHZ) data->vht160_supported = true; - if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) { + if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && + !(ch_flags & NVM_CHANNEL_VALID)) { /* * Channels might become valid later if lar is * supported, hence we still want to add them to @@ -359,7 +367,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, channel->max_power = IWL_DEFAULT_MAX_TX_POWER; /* don't put limitations in case we're using LAR */ - if (!lar_supported) + if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], ch_idx, is_5ghz, ch_flags, cfg); @@ -455,17 +463,17 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; } -void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported, - bool no_wide_in_5ghz) +static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, + const __le16 *nvm_ch_flags, u8 tx_chains, + u8 rx_chains, u32 sbands_flags) { int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, - lar_supported, no_wide_in_5ghz); + sbands_flags); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; @@ -491,7 +499,6 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); } -IWL_EXPORT_SYMBOL(iwl_init_sbands); static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, const __le16 *phy_sku) @@ -569,11 +576,15 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) dest[5] = hw_addr[0]; } -void iwl_set_hw_address_from_csr(struct iwl_trans *trans, - struct iwl_nvm_data *data) +static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data) { - __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); - __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); + __le32 mac_addr0 = + cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr0_strap)); + __le32 mac_addr1 = + cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr1_strap)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); /* @@ -583,12 +594,13 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans, if (is_valid_ether_addr(data->hw_addr)) return; - mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); - mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); + mac_addr0 = cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr0_otp)); + mac_addr1 = cpu_to_le32(iwl_read32(trans, + trans->cfg->csr->mac_addr1_otp)); iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); } -IWL_EXPORT_SYMBOL(iwl_set_hw_address_from_csr); static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, @@ -713,8 +725,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct device *dev = trans->dev; struct iwl_nvm_data *data; bool lar_enabled; - bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw); u32 sku, radio_cfg; + u32 sbands_flags = 0; u16 lar_config; const __le16 *ch_section; @@ -741,8 +753,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, rx_chains &= data->valid_rx_ant; sku = iwl_get_sku(cfg, nvm_sw, phy_sku); - data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; - data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; + data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; + data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) data->sku_cap_11n_enable = false; @@ -787,8 +799,14 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; } + if (lar_fw_supported && lar_enabled) + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + + if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw)) + sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; + iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, - lar_fw_supported && lar_enabled, no_wide_in_5ghz); + sbands_flags); data->calib_version = 255; return data; @@ -1007,8 +1025,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, continue; copy_rd->reg_rules[i].wmm_rule = d_wmm + - (regd->reg_rules[i].wmm_rule - s_wmm) / - sizeof(struct ieee80211_wmm_rule); + (regd->reg_rules[i].wmm_rule - s_wmm); } out: @@ -1017,3 +1034,294 @@ out: return copy_rd; } IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); + +#define IWL_MAX_NVM_SECTION_SIZE 0x1b58 +#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc +#define MAX_NVM_FILE_LEN 16384 + +void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, + unsigned int len) +{ +#define IWL_4165_DEVICE_ID 0x5501 +#define NVM_SKU_CAP_MIMO_DISABLE BIT(5) + + if (section == NVM_SECTION_TYPE_PHY_SKU && + hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && + (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) + /* OTP 0x52 bug work around: it's a 1x1 device */ + data[3] = ANT_B | (ANT_B << 4); +} +IWL_EXPORT_SYMBOL(iwl_nvm_fixups); + +/* + * Reads external NVM from a file into mvm->nvm_sections + * + * HOW TO CREATE THE NVM FILE FORMAT: + * ------------------------------ + * 1. create hex file, format: + * 3800 -> header + * 0000 -> header + * 5a40 -> data + * + * rev - 6 bit (word1) + * len - 10 bit (word1) + * id - 4 bit (word2) + * rsv - 12 bit (word2) + * + * 2. flip 8bits with 8 bits per line to get the right NVM file format + * + * 3. create binary file from the hex file + * + * 4. save as "iNVM_xxx.bin" under /lib/firmware + */ +int iwl_read_external_nvm(struct iwl_trans *trans, + const char *nvm_file_name, + struct iwl_nvm_section *nvm_sections) +{ + int ret, section_size; + u16 section_id; + const struct firmware *fw_entry; + const struct { + __le16 word1; + __le16 word2; + u8 data[]; + } *file_sec; + const u8 *eof; + u8 *temp; + int max_section_size; + const __le32 *dword_buff; + +#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) +#define NVM_WORD2_ID(x) (x >> 12) +#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) +#define EXT_NVM_WORD1_ID(x) ((x) >> 4) +#define NVM_HEADER_0 (0x2A504C54) +#define NVM_HEADER_1 (0x4E564D2A) +#define NVM_HEADER_SIZE (4 * sizeof(u32)) + + IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); + + /* Maximal size depends on NVM version */ + if (trans->cfg->nvm_type != IWL_NVM_EXT) + max_section_size = IWL_MAX_NVM_SECTION_SIZE; + else + max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; + + /* + * Obtain NVM image via request_firmware. Since we already used + * request_firmware_nowait() for the firmware binary load and only + * get here after that we assume the NVM request can be satisfied + * synchronously. + */ + ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); + if (ret) { + IWL_ERR(trans, "ERROR: %s isn't available %d\n", + nvm_file_name, ret); + return ret; + } + + IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", + nvm_file_name, fw_entry->size); + + if (fw_entry->size > MAX_NVM_FILE_LEN) { + IWL_ERR(trans, "NVM file too large\n"); + ret = -EINVAL; + goto out; + } + + eof = fw_entry->data + fw_entry->size; + dword_buff = (__le32 *)fw_entry->data; + + /* some NVM file will contain a header. + * The header is identified by 2 dwords header as follow: + * dword[0] = 0x2A504C54 + * dword[1] = 0x4E564D2A + * + * This header must be skipped when providing the NVM data to the FW. + */ + if (fw_entry->size > NVM_HEADER_SIZE && + dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && + dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { + file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); + IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); + IWL_INFO(trans, "NVM Manufacturing date %08X\n", + le32_to_cpu(dword_buff[3])); + + /* nvm file validation, dword_buff[2] holds the file version */ + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && + CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && + le32_to_cpu(dword_buff[2]) < 0xE4A) { + ret = -EFAULT; + goto out; + } + } else { + file_sec = (void *)fw_entry->data; + } + + while (true) { + if (file_sec->data > eof) { + IWL_ERR(trans, + "ERROR - NVM file too short for section header\n"); + ret = -EINVAL; + break; + } + + /* check for EOF marker */ + if (!file_sec->word1 && !file_sec->word2) { + ret = 0; + break; + } + + if (trans->cfg->nvm_type != IWL_NVM_EXT) { + section_size = + 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); + section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); + } else { + section_size = 2 * EXT_NVM_WORD2_LEN( + le16_to_cpu(file_sec->word2)); + section_id = EXT_NVM_WORD1_ID( + le16_to_cpu(file_sec->word1)); + } + + if (section_size > max_section_size) { + IWL_ERR(trans, "ERROR - section too large (%d)\n", + section_size); + ret = -EINVAL; + break; + } + + if (!section_size) { + IWL_ERR(trans, "ERROR - section empty\n"); + ret = -EINVAL; + break; + } + + if (file_sec->data + section_size > eof) { + IWL_ERR(trans, + "ERROR - NVM file too short for section (%d bytes)\n", + section_size); + ret = -EINVAL; + break; + } + + if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, + "Invalid NVM section ID %d\n", section_id)) { + ret = -EINVAL; + break; + } + + temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + break; + } + + iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); + + kfree(nvm_sections[section_id].data); + nvm_sections[section_id].data = temp; + nvm_sections[section_id].length = section_size; + + /* advance to the next section */ + file_sec = (void *)(file_sec->data + section_size); + } +out: + release_firmware(fw_entry); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_read_external_nvm); + +struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, + const struct iwl_fw *fw) +{ + struct iwl_nvm_get_info cmd = {}; + struct iwl_nvm_get_info_rsp *rsp; + struct iwl_nvm_data *nvm; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &cmd, }, + .len = { sizeof(cmd) }, + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) + }; + int ret; + bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && + fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + u32 mac_flags; + u32 sbands_flags = 0; + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + return ERR_PTR(ret); + + if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), + "Invalid payload len in NVM response from FW %d", + iwl_rx_packet_payload_len(hcmd.resp_pkt))) { + ret = -EINVAL; + goto out; + } + + rsp = (void *)hcmd.resp_pkt->data; + if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + IWL_INFO(trans, "OTP is empty\n"); + + nvm = kzalloc(sizeof(*nvm) + + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, + GFP_KERNEL); + if (!nvm) { + ret = -ENOMEM; + goto out; + } + + iwl_set_hw_address_from_csr(trans, nvm); + /* TODO: if platform NVM has MAC address - override it here */ + + if (!is_valid_ether_addr(nvm->hw_addr)) { + IWL_ERR(trans, "no valid mac address was found\n"); + ret = -EINVAL; + goto err_free; + } + + IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); + + /* Initialize general data */ + nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); + + /* Initialize MAC sku data */ + mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); + nvm->sku_cap_11ac_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); + nvm->sku_cap_11n_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); + nvm->sku_cap_band_24ghz_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); + nvm->sku_cap_band_52ghz_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); + nvm->sku_cap_mimo_disabled = + !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); + + /* Initialize PHY sku data */ + nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); + nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); + + if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { + nvm->lar_enabled = true; + sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; + } + + iwl_init_sbands(trans->dev, trans->cfg, nvm, + rsp->regulatory.channel_profile, + nvm->valid_tx_ant & fw->valid_tx_ant, + nvm->valid_rx_ant & fw->valid_rx_ant, + sbands_flags); + + iwl_free_resp(&hcmd); + return nvm; + +err_free: + kfree(nvm); +out: + iwl_free_resp(&hcmd); + return ERR_PTR(ret); +} +IWL_EXPORT_SYMBOL(iwl_get_nvm); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 3071a23b7606..234d1009a9de 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,17 @@ #include "iwl-eeprom-parse.h" /** + * enum iwl_nvm_sbands_flags - modification flags for the channel profiles + * + * @IWL_NVM_SBANDS_FLAGS_LAR: LAR is enabled + * @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz + */ +enum iwl_nvm_sbands_flags { + IWL_NVM_SBANDS_FLAGS_LAR = BIT(0), + IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ = BIT(1), +}; + +/** * iwl_parse_nvm_data - parse NVM data and return values * * This function parses all NVM values we need and then @@ -83,20 +96,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, u8 tx_chains, u8 rx_chains, bool lar_fw_supported); /** - * iwl_set_hw_address_from_csr - sets HW address for 9000 devices and on - */ -void iwl_set_hw_address_from_csr(struct iwl_trans *trans, - struct iwl_nvm_data *data); - -/** - * iwl_init_sbands - parse and set all channel profiles - */ -void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported, - bool no_wide_in_5ghz); - -/** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW * * This function parses the regulatory channel data received as a @@ -111,4 +110,33 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, u16 geo_info); +/** + * struct iwl_nvm_section - describes an NVM section in memory. + * + * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, + * and saved for later use by the driver. Not all NVM sections are saved + * this way, only the needed ones. + */ +struct iwl_nvm_section { + u16 length; + const u8 *data; +}; + +/** + * iwl_read_external_nvm - Reads external NVM from a file into nvm_sections + */ +int iwl_read_external_nvm(struct iwl_trans *trans, + const char *nvm_file_name, + struct iwl_nvm_section *nvm_sections); +void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, + unsigned int len); + +/** + * iwl_get_nvm - retrieve NVM data from firmware + * + * Allocates a new iwl_nvm_data structure, fills it with + * NVM data, and returns it to caller. + */ +struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, + const struct iwl_fw *fw); #endif /* __iwl_nvm_parse_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index c25ed1a0bbb0..1b9c627ee34d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -554,7 +554,7 @@ struct iwl_trans_ops { /* 22000 functions */ int (*txq_alloc)(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, + int cmd_id, int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); @@ -691,6 +691,8 @@ enum iwl_plat_pm_mode { * @wide_cmd_header: true when ucode supports wide command header format * @num_rx_queues: number of RX queues allocated by the transport; * the transport must set this before calling iwl_drv_start() + * @iml_len: the length of the image loader + * @iml: a pointer to the image loader itself * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. * The user should use iwl_trans_{alloc,free}_tx_cmd. * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before @@ -735,6 +737,9 @@ struct iwl_trans { u8 num_rx_queues; + size_t iml_len; + u8 *iml; + /* The following fields are internal only */ struct kmem_cache *dev_cmd_pool; char dev_cmd_pool_name[50]; @@ -952,8 +957,8 @@ iwl_trans_txq_free(struct iwl_trans *trans, int queue) static inline int iwl_trans_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, - unsigned int queue_wdg_timeout) + int cmd_id, int size, + unsigned int wdg_timeout) { might_sleep(); @@ -965,7 +970,7 @@ iwl_trans_txq_alloc(struct iwl_trans *trans, return -EIO; } - return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout); + return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout); } static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 890dbfff3a06..016e03a5034f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -279,6 +279,8 @@ struct iwl_bt_iterator_data { struct ieee80211_chanctx_conf *primary; struct ieee80211_chanctx_conf *secondary; bool primary_ll; + u8 primary_load; + u8 secondary_load; }; static inline @@ -295,6 +297,30 @@ void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm, enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0; } +#define MVM_COEX_TCM_PERIOD (HZ * 10) + +static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm, + struct iwl_bt_iterator_data *data) +{ + unsigned long now = jiffies; + + if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD)) + return; + + mvm->bt_coex_last_tcm_ts = now; + + /* We assume here that we don't have more than 2 vifs on 2.4GHz */ + + /* if the primary is low latency, it will stay primary */ + if (data->primary_ll) + return; + + if (data->primary_load >= data->secondary_load) + return; + + swap(data->primary, data->secondary); +} + /* must be called under rcu_read_lock */ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) @@ -385,6 +411,11 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, /* there is low latency vif - we will be secondary */ data->secondary = chanctx_conf; } + + if (data->primary == chanctx_conf) + data->primary_load = mvm->tcm.result.load[mvmvif->id]; + else if (data->secondary == chanctx_conf) + data->secondary_load = mvm->tcm.result.load[mvmvif->id]; return; } @@ -398,6 +429,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, /* if secondary is not NULL, it might be a GO */ data->secondary = chanctx_conf; + if (data->primary == chanctx_conf) + data->primary_load = mvm->tcm.result.load[mvmvif->id]; + else if (data->secondary == chanctx_conf) + data->secondary_load = mvm->tcm.result.load[mvmvif->id]; /* * don't reduce the Tx power if one of these is true: * we are in LOOSE @@ -449,6 +484,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_bt_notif_iterator, &data); + iwl_mvm_bt_coex_tcm_based_ci(mvm, &data); + if (data.primary) { struct ieee80211_chanctx_conf *chan = data.primary; if (WARN_ON(!chan->def.chan)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 96b52a275ee3..d61ff66ce07b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -69,6 +69,8 @@ #include <linux/ieee80211.h> +#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20 + #define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) @@ -112,6 +114,11 @@ #define IWL_MVM_PARSE_NVM 0 #define IWL_MVM_ADWELL_ENABLE 1 #define IWL_MVM_ADWELL_MAX_BUDGET 0 +#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */ +#define IWL_MVM_TCM_LOAD_HIGH_THRESH 50 /* percentage */ +#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */ +#define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */ +#define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 2efe9b099556..3fcf489f3120 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -693,6 +690,14 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, IWL_WOWLAN_WAKEUP_LINK_CHANGE); } + if (wowlan->any) { + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE | + IWL_WOWLAN_WAKEUP_RX_FRAME | + IWL_WOWLAN_WAKEUP_BCN_FILTERING); + } + return 0; } @@ -1097,6 +1102,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) /* make sure the d0i3 exit work is not pending */ flush_work(&mvm->d0i3_exit_work); + iwl_mvm_pause_tcm(mvm, true); iwl_fw_runtime_suspend(&mvm->fwrt); @@ -2014,6 +2020,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw) mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + iwl_mvm_resume_tcm(mvm); + iwl_fw_runtime_resume(&mvm->fwrt); return ret; @@ -2042,6 +2050,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + iwl_mvm_pause_tcm(mvm, true); + iwl_fw_runtime_suspend(&mvm->fwrt); /* start pseudo D3 */ @@ -2104,6 +2114,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) __iwl_mvm_resume(mvm, true); rtnl_unlock(); + iwl_mvm_resume_tcm(mvm); + iwl_fw_runtime_resume(&mvm->fwrt); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index f7fcf700196b..798605c4f122 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -269,6 +269,8 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file, mvmvif->id, mvmvif->color); pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n", vif->bss_conf.bssid); + pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n", + mvm->tcm.result.load[mvmvif->id]); pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n"); for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++) pos += scnprintf(buf+pos, bufsz-pos, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 0e6401cd7ccc..1c4178f20441 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1728,6 +1728,27 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +static ssize_t +iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1]; + unsigned int pos = 0; + size_t bufsz = sizeof(buf); + int i; + + mutex_lock(&mvm->mutex); + + for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) + pos += scnprintf(buf + pos, bufsz - pos, "%pM\n", + mvm->uapsd_noagg_bssids[i].addr); + + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ @@ -1762,6 +1783,8 @@ MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); +MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); + #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256); @@ -1972,6 +1995,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) goto err; + MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR); + #ifdef CONFIG_IWLWIFI_BCAST_FILTERING if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { bcast_dir = debugfs_create_dir("bcast_filtering", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 3c59109bea20..866c91c923be 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -79,6 +81,8 @@ #include "mvm.h" #include "fw/dbg.h" #include "iwl-phy-db.h" +#include "iwl-modparams.h" +#include "iwl-nvm-parse.h" #define MVM_UCODE_ALIVE_TIMEOUT HZ #define MVM_UCODE_CALIB_TIMEOUT (2*HZ) @@ -381,7 +385,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Load NVM to NIC if needed */ if (mvm->nvm_file_name) { - iwl_mvm_read_external_nvm(mvm); + iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, + mvm->nvm_sections); iwl_mvm_load_nvm_to_nic(mvm); } @@ -410,7 +415,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && read_nvm) { - mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt); + mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw); if (IS_ERR(mvm->nvm_data)) { ret = PTR_ERR(mvm->nvm_data); mvm->nvm_data = NULL; @@ -1093,6 +1098,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; + mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET; ret = iwl_mvm_config_scan(mvm); if (ret) goto error; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 9b2e1cb58e38..a6e072234398 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -953,6 +953,16 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: + if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == + iwl_mvm_sta_from_mac80211(sta)->sta_id) { + struct iwl_mvm_vif *mvmvif; + u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; + + mdata->opened_rx_ba_sessions = true; + mvmvif = iwl_mvm_vif_from_mac80211(vif); + cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); + } if (!iwl_enable_rx_ampdu(mvm->cfg)) { ret = -EINVAL; break; @@ -1436,6 +1446,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, mvm->p2p_device_vif = vif; } + iwl_mvm_tcm_add_vif(mvm, vif); + if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = true; @@ -1487,6 +1499,10 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_prepare_mac_removal(mvm, vif); + if (!(vif->type == NL80211_IFTYPE_AP || + vif->type == NL80211_IFTYPE_ADHOC)) + iwl_mvm_tcm_rm_vif(mvm, vif); + mutex_lock(&mvm->mutex); if (mvm->bf_allowed_vif == mvmvif) { @@ -2536,6 +2552,16 @@ static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const u8 *bssid) { + int i; + + if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct iwl_mvm_tcm_mac *mdata; + + mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; + ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); + mdata->opened_rx_ba_sessions = false; + } + if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; @@ -2550,6 +2576,13 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return; } + for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { + if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { + vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; + return; + } + } + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; } @@ -2652,7 +2685,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); /* track whether or not the station is associated */ - mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC; + mvm_sta->sta_state = new_state; if (old_state == IEEE80211_STA_NOTEXIST && new_state == IEEE80211_STA_NONE) { @@ -2704,8 +2737,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - true); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { @@ -2721,8 +2753,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, /* enable beacon filtering */ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); - iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, - false); + iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band); ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && @@ -2811,7 +2842,8 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, } static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + u16 req_duration) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; @@ -2824,6 +2856,9 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) return; + if (req_duration > duration) + duration = req_duration; + mutex_lock(&mvm->mutex); /* Try really hard to protect the session and hear a beacon */ iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d2cf751db68d..6a4ba160c59e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -90,7 +92,9 @@ #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" -#include "fw/debugfs.h" +#include "iwl-nvm-parse.h" + +#include <linux/average.h> #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ @@ -444,6 +448,8 @@ struct iwl_mvm_vif { /* FW identified misbehaving AP */ u8 uapsd_misbehaving_bssid[ETH_ALEN]; + struct delayed_work uapsd_nonagg_detected_wk; + /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; @@ -503,18 +509,6 @@ enum iwl_mvm_sched_scan_pass_all_states { }; /** - * struct iwl_nvm_section - describes an NVM section in memory. - * - * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD, - * and saved for later use by the driver. Not all NVM sections are saved - * this way, only the needed ones. - */ -struct iwl_nvm_section { - u16 length; - const u8 *data; -}; - -/** * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill * @dynamic_smps: Is thermal throttling enabled dynamic_smps? @@ -595,6 +589,53 @@ enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_ACTIVE, }; +enum iwl_mvm_traffic_load { + IWL_MVM_TRAFFIC_LOW, + IWL_MVM_TRAFFIC_MEDIUM, + IWL_MVM_TRAFFIC_HIGH, +}; + +DECLARE_EWMA(rate, 16, 16) + +struct iwl_mvm_tcm_mac { + struct { + u32 pkts[IEEE80211_NUM_ACS]; + u32 airtime; + } tx; + struct { + u32 pkts[IEEE80211_NUM_ACS]; + u32 airtime; + u32 last_ampdu_ref; + } rx; + struct { + /* track AP's transfer in client mode */ + u64 rx_bytes; + struct ewma_rate rate; + bool detected; + } uapsd_nonagg_detect; + bool opened_rx_ba_sessions; +}; + +struct iwl_mvm_tcm { + struct delayed_work work; + spinlock_t lock; /* used when time elapsed */ + unsigned long ts; /* timestamp when period ends */ + unsigned long ll_ts; + unsigned long uapsd_nonagg_ts; + bool paused; + struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER]; + struct { + u32 elapsed; /* milliseconds for this TCM period */ + u32 airtime[NUM_MAC_INDEX_DRIVER]; + enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER]; + enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS]; + enum iwl_mvm_traffic_load global_load; + bool low_latency[NUM_MAC_INDEX_DRIVER]; + bool change[NUM_MAC_INDEX_DRIVER]; + bool global_change; + } result; +}; + /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn @@ -829,7 +870,10 @@ struct iwl_mvm { unsigned int scan_status; void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; + /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type scan_type; + enum iwl_mvm_scan_type hb_scan_type; + enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; struct delayed_work scan_timeout_dwork; @@ -978,6 +1022,13 @@ struct iwl_mvm { */ bool temperature_test; /* Debug test temperature is enabled */ + unsigned long bt_coex_last_tcm_ts; + struct iwl_mvm_tcm tcm; + + u8 uapsd_noagg_bssid_write_idx; + struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM] + __aligned(2); + struct iwl_time_quota_cmd last_quota_cmd; #ifdef CONFIG_NL80211_TESTMODE @@ -1293,6 +1344,16 @@ static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_CDB_SUPPORT); } +static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm) +{ + /* + * TODO: should this be the same as iwl_mvm_is_cdb_supported()? + * but then there's a little bit of code in scan that won't make + * any sense... + */ + return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000; +} + static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) { return fw_has_api(&mvm->fw->ucode_capa, @@ -1438,7 +1499,6 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); -int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm); static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) { @@ -1771,6 +1831,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum iwl_mvm_low_latency_cause cause); /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); +bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); + /* get VMACLowLatencyMode */ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) { @@ -1906,6 +1968,17 @@ bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm); void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); +#define MVM_TCM_PERIOD_MSEC 500 +#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000) +#define MVM_LL_PERIOD (10 * HZ) +void iwl_mvm_tcm_work(struct work_struct *work); +void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm); +void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel); +void iwl_mvm_resume_tcm(struct iwl_mvm *mvm); +void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed); + void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 5bfe5306524c..cf48517944ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -76,9 +78,7 @@ #include "fw/acpi.h" /* Default NVM size to read */ -#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) -#define IWL_MAX_NVM_SECTION_SIZE 0x1b58 -#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc +#define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024) #define NVM_WRITE_OPCODE 1 #define NVM_READ_OPCODE 0 @@ -229,19 +229,6 @@ static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, return 0; } -static void iwl_mvm_nvm_fixups(struct iwl_mvm *mvm, unsigned int section, - u8 *data, unsigned int len) -{ -#define IWL_4165_DEVICE_ID 0x5501 -#define NVM_SKU_CAP_MIMO_DISABLE BIT(5) - - if (section == NVM_SECTION_TYPE_PHY_SKU && - mvm->trans->hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && - (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) - /* OTP 0x52 bug work around: it's a 1x1 device */ - data[3] = ANT_B | (ANT_B << 4); -} - /* * Reads an NVM section completely. * NICs prior to 7000 family doesn't have a real NVM, but just read @@ -282,7 +269,7 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, offset += ret; } - iwl_mvm_nvm_fixups(mvm, section, data, offset); + iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset); IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM section %d read completed\n", section); @@ -355,184 +342,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) lar_enabled); } -#define MAX_NVM_FILE_LEN 16384 - -/* - * Reads external NVM from a file into mvm->nvm_sections - * - * HOW TO CREATE THE NVM FILE FORMAT: - * ------------------------------ - * 1. create hex file, format: - * 3800 -> header - * 0000 -> header - * 5a40 -> data - * - * rev - 6 bit (word1) - * len - 10 bit (word1) - * id - 4 bit (word2) - * rsv - 12 bit (word2) - * - * 2. flip 8bits with 8 bits per line to get the right NVM file format - * - * 3. create binary file from the hex file - * - * 4. save as "iNVM_xxx.bin" under /lib/firmware - */ -int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) -{ - int ret, section_size; - u16 section_id; - const struct firmware *fw_entry; - const struct { - __le16 word1; - __le16 word2; - u8 data[]; - } *file_sec; - const u8 *eof; - u8 *temp; - int max_section_size; - const __le32 *dword_buff; - -#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) -#define NVM_WORD2_ID(x) (x >> 12) -#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) -#define EXT_NVM_WORD1_ID(x) ((x) >> 4) -#define NVM_HEADER_0 (0x2A504C54) -#define NVM_HEADER_1 (0x4E564D2A) -#define NVM_HEADER_SIZE (4 * sizeof(u32)) - - IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); - - /* Maximal size depends on NVM version */ - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) - max_section_size = IWL_MAX_NVM_SECTION_SIZE; - else - max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; - - /* - * Obtain NVM image via request_firmware. Since we already used - * request_firmware_nowait() for the firmware binary load and only - * get here after that we assume the NVM request can be satisfied - * synchronously. - */ - ret = request_firmware(&fw_entry, mvm->nvm_file_name, - mvm->trans->dev); - if (ret) { - IWL_ERR(mvm, "ERROR: %s isn't available %d\n", - mvm->nvm_file_name, ret); - return ret; - } - - IWL_INFO(mvm, "Loaded NVM file %s (%zu bytes)\n", - mvm->nvm_file_name, fw_entry->size); - - if (fw_entry->size > MAX_NVM_FILE_LEN) { - IWL_ERR(mvm, "NVM file too large\n"); - ret = -EINVAL; - goto out; - } - - eof = fw_entry->data + fw_entry->size; - dword_buff = (__le32 *)fw_entry->data; - - /* some NVM file will contain a header. - * The header is identified by 2 dwords header as follow: - * dword[0] = 0x2A504C54 - * dword[1] = 0x4E564D2A - * - * This header must be skipped when providing the NVM data to the FW. - */ - if (fw_entry->size > NVM_HEADER_SIZE && - dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && - dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { - file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); - IWL_INFO(mvm, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); - IWL_INFO(mvm, "NVM Manufacturing date %08X\n", - le32_to_cpu(dword_buff[3])); - - /* nvm file validation, dword_buff[2] holds the file version */ - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 && - CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP && - le32_to_cpu(dword_buff[2]) < 0xE4A) { - ret = -EFAULT; - goto out; - } - } else { - file_sec = (void *)fw_entry->data; - } - - while (true) { - if (file_sec->data > eof) { - IWL_ERR(mvm, - "ERROR - NVM file too short for section header\n"); - ret = -EINVAL; - break; - } - - /* check for EOF marker */ - if (!file_sec->word1 && !file_sec->word2) { - ret = 0; - break; - } - - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { - section_size = - 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); - section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); - } else { - section_size = 2 * EXT_NVM_WORD2_LEN( - le16_to_cpu(file_sec->word2)); - section_id = EXT_NVM_WORD1_ID( - le16_to_cpu(file_sec->word1)); - } - - if (section_size > max_section_size) { - IWL_ERR(mvm, "ERROR - section too large (%d)\n", - section_size); - ret = -EINVAL; - break; - } - - if (!section_size) { - IWL_ERR(mvm, "ERROR - section empty\n"); - ret = -EINVAL; - break; - } - - if (file_sec->data + section_size > eof) { - IWL_ERR(mvm, - "ERROR - NVM file too short for section (%d bytes)\n", - section_size); - ret = -EINVAL; - break; - } - - if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, - "Invalid NVM section ID %d\n", section_id)) { - ret = -EINVAL; - break; - } - - temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); - if (!temp) { - ret = -ENOMEM; - break; - } - - iwl_mvm_nvm_fixups(mvm, section_id, temp, section_size); - - kfree(mvm->nvm_sections[section_id].data); - mvm->nvm_sections[section_id].data = temp; - mvm->nvm_sections[section_id].length = section_size; - - /* advance to the next section */ - file_sec = (void *)(file_sec->data + section_size); - } -out: - release_firmware(fw_entry); - return ret; -} - /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) { @@ -585,7 +394,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) break; } - iwl_mvm_nvm_fixups(mvm, section, temp, ret); + iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret); mvm->nvm_sections[section].data = temp; mvm->nvm_sections[section].length = ret; @@ -624,14 +433,17 @@ int iwl_nvm_init(struct iwl_mvm *mvm) /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { /* read External NVM file from the mod param */ - ret = iwl_mvm_read_external_nvm(mvm); + ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, + mvm->nvm_sections); if (ret) { mvm->nvm_file_name = nvm_file_C; if ((ret == -EFAULT || ret == -ENOENT) && mvm->nvm_file_name) { /* in case nvm file was failed try again */ - ret = iwl_mvm_read_external_nvm(mvm); + ret = iwl_read_external_nvm(mvm->trans, + mvm->nvm_file_name, + mvm->nvm_sections); if (ret) return ret; } else { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 224bfa1bcf53..ff1e518096c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -250,6 +250,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC), RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC), + RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, + iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC), + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, @@ -667,6 +670,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); + spin_lock_init(&mvm->tcm.lock); + INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work); + mvm->tcm.ts = jiffies; + mvm->tcm.ll_ts = jiffies; + mvm->tcm.uapsd_nonagg_ts = jiffies; + INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); /* @@ -730,6 +739,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, sizeof(trans->dbg_conf_tlv)); trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; + trans->iml = mvm->fw->iml; + trans->iml_len = mvm->fw->iml_len; + /* set up notification wait support */ iwl_notification_wait_init(&mvm->notif_wait); @@ -859,6 +871,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) kfree(mvm->nvm_sections[i].data); + cancel_delayed_work_sync(&mvm->tcm.work); + iwl_mvm_tof_clean(mvm); mutex_destroy(&mvm->mutex); @@ -1026,8 +1040,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_queue_notif(mvm, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); - else if (cmd == WIDE_ID(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF)) - iwl_mvm_tlc_update_notif(mvm, pkt); else iwl_mvm_rx_common(mvm, rxb, pkt); } @@ -1432,6 +1444,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) mvm->d0i3_offloading = false; } + iwl_mvm_pause_tcm(mvm, true); /* make sure we have no running tx while configuring the seqno */ synchronize_net(); @@ -1615,6 +1628,7 @@ out: /* the FW might have updated the regdomain */ iwl_mvm_update_changed_regdom(mvm); + iwl_mvm_resume_tcm(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); mutex_unlock(&mvm->mutex); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index fb5745660509..b8b2b819e8e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -65,14 +67,14 @@ static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta) { switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: - return IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ; + return IWL_TLC_MNG_CH_WIDTH_160MHZ; case IEEE80211_STA_RX_BW_80: - return IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ; + return IWL_TLC_MNG_CH_WIDTH_80MHZ; case IEEE80211_STA_RX_BW_40: - return IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ; + return IWL_TLC_MNG_CH_WIDTH_40MHZ; case IEEE80211_STA_RX_BW_20: default: - return IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ; + return IWL_TLC_MNG_CH_WIDTH_20MHZ; } } @@ -85,7 +87,9 @@ static u8 rs_fw_set_active_chains(u8 chains) if (chains & ANT_B) fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; if (chains & ANT_C) - fw_chains |= IWL_TLC_MNG_CHAIN_C_MSK; + WARN(false, + "tlc offload doesn't support antenna C. chains: 0x%x\n", + chains); return fw_chains; } @@ -97,13 +101,13 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) u8 supp = 0; if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) - supp |= IWL_TLC_MNG_SGI_20MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) - supp |= IWL_TLC_MNG_SGI_40MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) - supp |= IWL_TLC_MNG_SGI_80MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) - supp |= IWL_TLC_MNG_SGI_160MHZ_MSK; + supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); return supp; } @@ -114,9 +118,7 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; bool vht_ena = vht_cap && vht_cap->vht_supported; - u16 flags = IWL_TLC_MNG_CFG_FLAGS_CCK_MSK | - IWL_TLC_MNG_CFG_FLAGS_DCM_MSK | - IWL_TLC_MNG_CFG_FLAGS_DD_MSK; + u16 flags = 0; if (mvm->cfg->ht_params->stbc && (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && @@ -129,16 +131,11 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) && - (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) && - (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) - flags |= IWL_TLC_MNG_CFG_FLAGS_BF_MSK; - return flags; } static -int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, +int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap, int nss) { u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & @@ -160,15 +157,16 @@ int rs_fw_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, return 0; } -static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta, - struct ieee80211_sta_vht_cap *vht_cap, - struct iwl_tlc_config_cmd *cmd) +static void +rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, + const struct ieee80211_sta_vht_cap *vht_cap, + struct iwl_tlc_config_cmd *cmd) { u16 supp; int i, highest_mcs; for (i = 0; i < sta->rx_nss; i++) { - if (i == MAX_RS_ANT_NUM) + if (i == MAX_NSS) break; highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1); @@ -179,7 +177,9 @@ static void rs_fw_vht_set_enabled_rates(struct ieee80211_sta *sta, if (sta->bandwidth == IEEE80211_STA_RX_BW_20) supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); - cmd->ht_supp_rates[i] = cpu_to_le16(supp); + cmd->ht_rates[i][0] = cpu_to_le16(supp); + if (sta->bandwidth == IEEE80211_STA_RX_BW_160) + cmd->ht_rates[i][1] = cmd->ht_rates[i][0]; } } @@ -190,8 +190,8 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, int i; unsigned long tmp; unsigned long supp; /* must be unsigned long for for_each_set_bit */ - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; /* non HT rates */ supp = 0; @@ -199,45 +199,40 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, for_each_set_bit(i, &tmp, BITS_PER_LONG) supp |= BIT(sband->bitrates[i].hw_value); - cmd->non_ht_supp_rates = cpu_to_le16(supp); + cmd->non_ht_rates = cpu_to_le16(supp); cmd->mode = IWL_TLC_MNG_MODE_NON_HT; - /* HT/VHT rates */ if (vht_cap && vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap && ht_cap->ht_supported) { cmd->mode = IWL_TLC_MNG_MODE_HT; - cmd->ht_supp_rates[0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); - cmd->ht_supp_rates[1] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); + cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]); + cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]); } } -static void rs_fw_tlc_mng_notif_req_config(struct iwl_mvm *mvm, u8 sta_id) -{ - u32 cmd_id = iwl_cmd_id(TLC_MNG_NOTIF_REQ_CMD, DATA_PATH_GROUP, 0); - struct iwl_tlc_notif_req_config_cmd cfg_cmd = { - .sta_id = sta_id, - .flags = cpu_to_le16(IWL_TLC_NOTIF_INIT_RATE_MSK), - .interval = cpu_to_le16(IWL_TLC_NOTIF_REQ_INTERVAL), - }; - int ret; - - ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); - if (ret) - IWL_ERR(mvm, "Failed to send TLC notif request (%d)\n", ret); -} - -void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) +void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { + struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_tlc_update_notif *notif; + struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; struct iwl_lq_sta_rs_fw *lq_sta; + u32 flags; rcu_read_lock(); notif = (void *)pkt->data; - mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, notif->sta_id); + sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); + if (IS_ERR_OR_NULL(sta)) { + IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", + notif->sta_id); + goto out; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); if (!mvmsta) { IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n", @@ -245,14 +240,30 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) goto out; } + flags = le32_to_cpu(notif->flags); + lq_sta = &mvmsta->lq_sta.rs_fw; - if (le16_to_cpu(notif->flags) & IWL_TLC_NOTIF_INIT_RATE_MSK) { - lq_sta->last_rate_n_flags = - le32_to_cpu(notif->values[IWL_TLC_NOTIF_INIT_RATE_POS]); + if (flags & IWL_TLC_NOTIF_FLAG_RATE) { + lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate); IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n", lq_sta->last_rate_n_flags); } + + if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) { + u16 size = le32_to_cpu(notif->amsdu_size); + + if (WARN_ON(sta->max_amsdu_len < size)) + goto out; + + mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); + mvmsta->max_amsdu_len = size; + + IWL_DEBUG_RATE(mvm, + "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", + le32_to_cpu(notif->amsdu_size), size, + mvmsta->amsdu_enabled); + } out: rcu_read_unlock(); } @@ -267,12 +278,12 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_supported_band *sband; struct iwl_tlc_config_cmd cfg_cmd = { .sta_id = mvmsta->sta_id, - .max_supp_ch_width = rs_fw_bw_from_sta_bw(sta), + .max_ch_width = rs_fw_bw_from_sta_bw(sta), .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)), .chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)), - .max_supp_ss = sta->rx_nss, - .max_ampdu_cnt = cpu_to_le32(mvmsta->max_agg_bufsize), + .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len), .sgi_ch_width_supp = rs_fw_sgi_cw_support(sta), + .amsdu = iwl_mvm_is_csum_supported(mvm), }; int ret; @@ -287,8 +298,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd); if (ret) IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret); - - rs_fw_tlc_mng_notif_req_config(mvm, cfg_cmd.sta_id); } int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 5d776ec1840f..642da10b0b7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3,6 +3,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -13,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -651,9 +648,10 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, } tid_data = &mvmsta->tid_data[tid]; - if ((tid_data->state == IWL_AGG_OFF) && + if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED && + tid_data->state == IWL_AGG_OFF && (lq_sta->tx_agg_tid_en & BIT(tid)) && - (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { + tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD) { IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0) tid_data->state = IWL_AGG_QUEUED; @@ -1257,7 +1255,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - iwl_mvm_rs_rate_init(mvm, sta, info->band, false); + iwl_mvm_rs_rate_init(mvm, sta, info->band); return; } lq_sta->last_tx = jiffies; @@ -1715,12 +1713,18 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + /* + * In case TLC offload is not active amsdu_enabled is either 0xFFFF + * or 0, since there is no per-TID alg. + */ if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || tbl->rate.index < IWL_RATE_MCS_5_INDEX || scale_action == RS_ACTION_DOWNSCALE) - mvmsta->tlc_amsdu = false; + mvmsta->amsdu_enabled = 0; else - mvmsta->tlc_amsdu = true; + mvmsta->amsdu_enabled = 0xFFFF; + + mvmsta->max_amsdu_len = sta->max_amsdu_len; } /* @@ -2684,9 +2688,9 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, enum nl80211_band band, - struct rs_rate *rate, - bool init) + struct rs_rate *rate) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int i, nentries; unsigned long active_rate; s8 best_rssi = S8_MIN; @@ -2748,7 +2752,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, * bandwidth rate, and after authorization, when the phy context * is already up-to-date, re-init rs with the correct bw. */ - u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); + u32 bw = mvmsta->sta_state < IEEE80211_STA_AUTHORIZED ? + RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta); switch (bw) { case RATE_MCS_CHAN_WIDTH_40: @@ -2833,9 +2838,9 @@ void rs_update_last_rssi(struct iwl_mvm *mvm, static void rs_initialize_lq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - enum nl80211_band band, - bool init) + enum nl80211_band band) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_scale_tbl_info *tbl; struct rs_rate *rate; u8 active_tbl = 0; @@ -2851,7 +2856,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, tbl = &(lq_sta->lq_info[active_tbl]); rate = &tbl->rate; - rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init); + rs_get_initial_rate(mvm, sta, lq_sta, band, rate); rs_init_optimal_rate(mvm, sta, lq_sta); WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, @@ -2864,7 +2869,8 @@ static void rs_initialize_lq(struct iwl_mvm *mvm, rs_set_expected_tpt_table(lq_sta, tbl); rs_fill_lq_cmd(mvm, sta, lq_sta, rate); /* TODO restore station should remember the lq cmd */ - iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, init); + iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED); } static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta, @@ -3117,7 +3123,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) * Called after adding a new station to initialize rate scaling */ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool init) + enum nl80211_band band) { int i, j; struct ieee80211_hw *hw = mvm->hw; @@ -3134,7 +3140,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, sband = hw->wiphy->bands[band]; lq_sta->lq.sta_id = mvmsta->sta_id; - mvmsta->tlc_amsdu = false; + mvmsta->amsdu_enabled = 0; + mvmsta->max_amsdu_len = sta->max_amsdu_len; for (j = 0; j < LQ_SIZE; j++) rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); @@ -3196,7 +3203,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm); #endif - rs_initialize_lq(mvm, sta, lq_sta, band, init); + rs_initialize_lq(mvm, sta, lq_sta, band); } static void rs_drv_rate_update(void *mvm_r, @@ -3216,7 +3223,7 @@ static void rs_drv_rate_update(void *mvm_r, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) ieee80211_stop_tx_ba_session(sta, tid); - iwl_mvm_rs_rate_init(mvm, sta, sband->band, false); + iwl_mvm_rs_rate_init(mvm, sta, sband->band); } #ifdef CONFIG_MAC80211_DEBUGFS @@ -3744,7 +3751,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, (rate->sgi) ? "SGI" : "NGI", (rate->ldpc) ? "LDPC" : "BCC", (lq_sta->is_agg) ? "AGG on" : "", - (mvmsta->tlc_amsdu) ? "AMSDU on" : ""); + (mvmsta->amsdu_enabled) ? "AMSDU on" : ""); } desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n", lq_sta->last_rate_n_flags); @@ -4062,12 +4069,12 @@ static const struct rate_control_ops rs_mvm_ops_drv = { }; void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool init) + enum nl80211_band band) { if (iwl_mvm_has_tlc_offload(mvm)) rs_fw_rate_init(mvm, sta, band); else - rs_drv_rate_init(mvm, sta, band, init); + rs_drv_rate_init(mvm, sta, band); } int iwl_mvm_rate_control_register(void) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index fb18cb8c233d..cffb8c852934 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -13,10 +14,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * @@ -410,7 +407,7 @@ struct iwl_lq_sta { /* Initialize station's rate scaling information after adding station */ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - enum nl80211_band band, bool init); + enum nl80211_band band); /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, @@ -454,5 +451,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum nl80211_band band); int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, bool enable); -void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt); +void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); #endif /* __rs__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index d26833c5ce1f..bfb163419c67 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -254,6 +254,74 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, return 0; } +static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr, u32 len, + struct iwl_rx_phy_info *phy_info, + u32 rate_n_flags) +{ + struct iwl_mvm_sta *mvmsta; + struct iwl_mvm_tcm_mac *mdata; + int mac; + int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */ + struct iwl_mvm_vif *mvmvif; + /* expected throughput in 100Kbps, single stream, 20 MHz */ + static const u8 thresh_tpt[] = { + 9, 18, 30, 42, 60, 78, 90, 96, 120, 135, + }; + u16 thr; + + if (ieee80211_is_data_qos(hdr->frame_control)) + ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)]; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; + + if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) + schedule_delayed_work(&mvm->tcm.work, 0); + mdata = &mvm->tcm.data[mac]; + mdata->rx.pkts[ac]++; + + /* count the airtime only once for each ampdu */ + if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) { + mdata->rx.last_ampdu_ref = mvm->ampdu_ref; + mdata->rx.airtime += le16_to_cpu(phy_info->frame_time); + } + + if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK))) + return; + + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + + if (mdata->opened_rx_ba_sessions || + mdata->uapsd_nonagg_detect.detected || + (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) || + mvmsta->sta_id != mvmvif->ap_sta_id) + return; + + if (rate_n_flags & RATE_MCS_HT_MSK) { + thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK]; + thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >> + RATE_HT_MCS_NSS_POS); + } else { + if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >= + ARRAY_SIZE(thresh_tpt))) + return; + thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK]; + thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS); + } + + thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >> + RATE_MCS_CHAN_WIDTH_POS); + + mdata->uapsd_nonagg_detect.rx_bytes += len; + ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr); +} + static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, struct sk_buff *skb, u32 status) @@ -408,6 +476,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, NULL); } + if (!mvm->tcm.paused && len >= sizeof(*hdr) && + !is_multicast_ether_addr(hdr->addr1) && + ieee80211_is_data(hdr->frame_control)) + iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info, + rate_n_flags); + if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, rx_pkt_status); } @@ -654,7 +728,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, int expected_size; int i; u8 *energy; - __le32 *bytes, *air_time; + __le32 *bytes; + __le32 *air_time; __le32 flags; if (!iwl_mvm_has_new_rx_stats_api(mvm)) { @@ -752,6 +827,32 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, sta->avg_energy = energy[i]; } rcu_read_unlock(); + + /* + * Don't update in case the statistics are not cleared, since + * we will end up counting twice the same airtime, once in TCM + * request and once in statistics notification. + */ + if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)) + return; + + spin_lock(&mvm->tcm.lock); + for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i]; + u32 airtime = le32_to_cpu(air_time[i]); + u32 rx_bytes = le32_to_cpu(bytes[i]); + + mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes; + if (airtime) { + /* re-init every time to store rate from FW */ + ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); + ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, + rx_bytes * 8 / airtime); + } + + mdata->rx.airtime += airtime; + } + spin_unlock(&mvm->tcm.lock); } void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 4a4ccfd11e5b..129c4c09648d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -112,7 +112,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, return -1; if (ieee80211_is_data_qos(hdr->frame_control)) - tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); else tid = 0; @@ -151,17 +151,9 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { + len -= 2; pad_len = 2; - - /* - * If the device inserted padding it means that (it thought) - * the 802.11 header wasn't a multiple of 4 bytes long. In - * this case, reserve two bytes at the start of the SKB to - * align the payload properly in case we end up copying it. - */ - skb_reserve(skb, pad_len); } - len -= pad_len; /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and @@ -235,12 +227,24 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, - struct ieee80211_rx_status *stats, - struct iwl_rx_mpdu_desc *desc, u32 pkt_flags, - int queue, u8 *crypt_len) + struct ieee80211_rx_status *stats, u16 phy_info, + struct iwl_rx_mpdu_desc *desc, + u32 pkt_flags, int queue, u8 *crypt_len) { u16 status = le16_to_cpu(desc->status); + /* + * Drop UNKNOWN frames in aggregation, unless in monitor mode + * (where we don't have the keys). + * We limit this to aggregation because in TKIP this is a valid + * scenario, since we may not have the (correct) TTAK (phase 1 + * key) in the firmware. + */ + if (phy_info & IWL_RX_MPDU_PHY_AMPDU && + (status & IWL_RX_MPDU_STATUS_SEC_MASK) == + IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on) + return -1; + if (!ieee80211_has_protected(hdr->frame_control) || (status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE) @@ -347,8 +351,7 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, if (ieee80211_is_data_qos(hdr->frame_control)) /* frame has qos control */ - tid = *ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); else tid = IWL_MAX_TID_COUNT; @@ -587,14 +590,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, notif = (void *)pkt->data; internal_notif = (void *)notif->payload; - if (internal_notif->sync) { - if (mvm->queue_sync_cookie != internal_notif->cookie) { - WARN_ONCE(1, - "Received expired RX queue sync message\n"); - return; - } - if (!atomic_dec_return(&mvm->queue_sync_counter)) - wake_up(&mvm->rx_sync_waitq); + if (internal_notif->sync && + mvm->queue_sync_cookie != internal_notif->cookie) { + WARN_ONCE(1, "Received expired RX queue sync message\n"); + return; } switch (internal_notif->type) { @@ -606,6 +605,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, default: WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); } + + if (internal_notif->sync && + !atomic_dec_return(&mvm->queue_sync_counter)) + wake_up(&mvm->rx_sync_waitq); } /* @@ -628,7 +631,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; bool last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; - u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + u8 tid = ieee80211_get_tid(hdr); u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; struct iwl_mvm_reorder_buf_entry *entries; @@ -867,9 +870,19 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, return; } + if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { + /* + * If the device inserted padding it means that (it thought) + * the 802.11 header wasn't a multiple of 4 bytes long. In + * this case, reserve two bytes at the start of the SKB to + * align the payload properly in case we end up copying it. + */ + skb_reserve(skb, 2); + } + rx_status = IEEE80211_SKB_RXCB(skb); - if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, desc, + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { kfree_skb(skb); @@ -941,6 +954,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); + if (!mvm->tcm.paused && len >= sizeof(*hdr) && + !is_multicast_ether_addr(hdr->addr1) && + ieee80211_is_data(hdr->frame_control) && + time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) + schedule_delayed_work(&mvm->tcm.work, 0); + /* * We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index b31f0ffbbbf0..4b3753d78d03 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,9 +20,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -76,12 +75,6 @@ #define IWL_DENSE_EBS_SCAN_RATIO 5 #define IWL_SPARSE_EBS_SCAN_RATIO 1 -enum iwl_mvm_traffic_load { - IWL_MVM_TRAFFIC_LOW, - IWL_MVM_TRAFFIC_MEDIUM, - IWL_MVM_TRAFFIC_HIGH, -}; - #define IWL_SCAN_DWELL_ACTIVE 10 #define IWL_SCAN_DWELL_PASSIVE 110 #define IWL_SCAN_DWELL_FRAGMENTED 44 @@ -123,7 +116,9 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = { }; struct iwl_mvm_scan_params { + /* For CDB this is low band scan type, for non-CDB - type. */ enum iwl_mvm_scan_type type; + enum iwl_mvm_scan_type hb_type; u32 n_channels; u16 delay; int n_ssids; @@ -152,7 +147,7 @@ static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return (void *)&cmd->v7.data; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_cdb_scan_api(mvm)) return (void *)&cmd->v6.data; return (void *)&cmd->v1.data; @@ -169,7 +164,7 @@ iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm) if (iwl_mvm_is_adaptive_dwell_supported(mvm)) return &cmd->v7.channel; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_cdb_scan_api(mvm)) return &cmd->v6.channel; return &cmd->v1.channel; @@ -234,15 +229,21 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac, static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm) { - return IWL_MVM_TRAFFIC_LOW; + return mvm->tcm.result.global_load; +} + +static enum iwl_mvm_traffic_load +iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band) +{ + return mvm->tcm.result.band_load[band]; } static enum -iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) +iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device, + enum iwl_mvm_traffic_load load, + bool low_latency) { int global_cnt = 0; - enum iwl_mvm_traffic_load load; - bool low_latency; ieee80211_iterate_active_interfaces_atomic(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, @@ -251,9 +252,6 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) if (!global_cnt) return IWL_SCAN_TYPE_UNASSOC; - load = iwl_mvm_get_traffic_load(mvm); - low_latency = iwl_mvm_low_latency(mvm); - if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) return IWL_SCAN_TYPE_FRAGMENTED; @@ -264,25 +262,57 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) return IWL_SCAN_TYPE_WILD; } +static enum +iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device) +{ + enum iwl_mvm_traffic_load load; + bool low_latency; + + load = iwl_mvm_get_traffic_load(mvm); + low_latency = iwl_mvm_low_latency(mvm); + + return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); +} + +static enum +iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm, + bool p2p_device, + enum nl80211_band band) +{ + enum iwl_mvm_traffic_load load; + bool low_latency; + + load = iwl_mvm_get_traffic_load_band(mvm, band); + low_latency = iwl_mvm_low_latency_band(mvm, band); + + return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency); +} + static int iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm, struct cfg80211_scan_request *req, struct iwl_mvm_scan_params *params) { + u32 duration = scan_timing[params->type].max_out_time; + if (!req->duration) return 0; - if (req->duration_mandatory && - req->duration > scan_timing[params->type].max_out_time) { + if (iwl_mvm_is_cdb_supported(mvm)) { + u32 hb_time = scan_timing[params->hb_type].max_out_time; + + duration = min_t(u32, duration, hb_time); + } + + if (req->duration_mandatory && req->duration > duration) { IWL_DEBUG_SCAN(mvm, "Measurement scan - too long dwell %hu (max out time %u)\n", req->duration, - scan_timing[params->type].max_out_time); + duration); return -EOPNOTSUPP; } - return min_t(u32, (u32)req->duration, - scan_timing[params->type].max_out_time); + return min_t(u32, (u32)req->duration, duration); } static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm) @@ -437,6 +467,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, ieee80211_scan_completed(mvm->hw, &info); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); + iwl_mvm_resume_tcm(mvm); } else { IWL_ERR(mvm, "got scan complete notification but no scan is running\n"); @@ -1030,22 +1061,38 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config, static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config, u32 flags, u8 channel_flags) { - enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); struct iwl_scan_config *cfg = config; cfg->flags = cpu_to_le32(flags); cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm)); cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm); - cfg->out_of_channel_time[0] = - cpu_to_le32(scan_timing[type].max_out_time); - cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - cfg->suspend_time[1] = - cpu_to_le32(scan_timing[type].suspend_time); - cfg->out_of_channel_time[1] = + enum iwl_mvm_scan_type lb_type, hb_type; + + lb_type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_2GHZ); + hb_type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_5GHZ); + + cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(scan_timing[lb_type].max_out_time); + cfg->suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(scan_timing[lb_type].suspend_time); + + cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(scan_timing[hb_type].max_out_time); + cfg->suspend_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(scan_timing[hb_type].suspend_time); + } else { + enum iwl_mvm_scan_type type = + iwl_mvm_get_scan_type(mvm, false); + + cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(scan_timing[type].max_out_time); + cfg->suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(scan_timing[type].suspend_time); } iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell); @@ -1065,7 +1112,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) struct iwl_host_cmd cmd = { .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0), }; - enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false); + enum iwl_mvm_scan_type type; + enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET; int num_channels = mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels + mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels; @@ -1075,10 +1123,20 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; - if (type == mvm->scan_type) - return 0; + if (iwl_mvm_is_cdb_supported(mvm)) { + type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_2GHZ); + hb_type = iwl_mvm_get_scan_type_band(mvm, false, + NL80211_BAND_5GHZ); + if (type == mvm->scan_type && hb_type == mvm->hb_scan_type) + return 0; + } else { + type = iwl_mvm_get_scan_type(mvm, false); + if (type == mvm->scan_type) + return 0; + } - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_cdb_scan_api(mvm)) cmd_size = sizeof(struct iwl_scan_config); else cmd_size = sizeof(struct iwl_scan_config_v1); @@ -1107,10 +1165,15 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_CHANNEL_FLAG_EBS_ADD | IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE; - if (iwl_mvm_has_new_tx_api(mvm)) { - flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ? - SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : - SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; + /* + * Check for fragmented scan on LMAC2 - high band. + * LMAC1 - low band is checked above. + */ + if (iwl_mvm_cdb_scan_api(mvm)) { + if (iwl_mvm_is_cdb_supported(mvm)) + flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ? + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED : + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED; iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags); } else { iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags); @@ -1123,8 +1186,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n"); ret = iwl_mvm_send_cmd(mvm, &cmd); - if (!ret) + if (!ret) { mvm->scan_type = type; + mvm->hb_scan_type = hb_type; + } kfree(cfg); return ret; @@ -1178,7 +1243,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - hb_timing = &scan_timing[params->type]; + hb_timing = &scan_timing[params->hb_type]; cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); @@ -1208,7 +1273,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_is_cdb_supported(mvm)) { - hb_timing = &scan_timing[params->type]; + hb_timing = &scan_timing[params->hb_type]; cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(hb_timing->max_out_time); @@ -1216,7 +1281,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cpu_to_le32(hb_timing->suspend_time); } - if (iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_cdb_scan_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = @@ -1232,6 +1297,11 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cpu_to_le32(timing->suspend_time); } } + + if (iwl_mvm_is_regular_scan(params)) + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + else + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void @@ -1262,11 +1332,12 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0) flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT; - if (params->type == IWL_SCAN_TYPE_FRAGMENTED) { + if (params->type == IWL_SCAN_TYPE_FRAGMENTED) flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED; - if (iwl_mvm_is_cdb_supported(mvm)) - flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; - } + + if (iwl_mvm_is_cdb_supported(mvm) && + params->hb_type == IWL_SCAN_TYPE_FRAGMENTED) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED; if (iwl_mvm_rrm_scan_needed(mvm) && fw_has_capa(&mvm->fw->ucode_capa, @@ -1497,6 +1568,21 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work) iwl_force_nmi(mvm->trans); } +static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm, + struct iwl_mvm_scan_params *params, + bool p2p) +{ + if (iwl_mvm_is_cdb_supported(mvm)) { + params->type = + iwl_mvm_get_scan_type_band(mvm, p2p, + NL80211_BAND_2GHZ); + params->hb_type = + iwl_mvm_get_scan_type_band(mvm, p2p, + NL80211_BAND_5GHZ); + } else { + params->type = iwl_mvm_get_scan_type(mvm, p2p); + } +} int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_scan_request *req, struct ieee80211_scan_ies *ies) @@ -1544,9 +1630,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, params.scan_plans = &scan_plan; params.n_scan_plans = 1; - params.type = - iwl_mvm_get_scan_type(mvm, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, + vif->type == NL80211_IFTYPE_P2P_DEVICE); ret = iwl_mvm_get_measurement_dwell(mvm, req, ¶ms); if (ret < 0) @@ -1568,6 +1653,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (ret) return ret; + iwl_mvm_pause_tcm(mvm, false); + ret = iwl_mvm_send_cmd(mvm, &hcmd); if (ret) { /* If the scan failed, it usually means that the FW was unable @@ -1575,6 +1662,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, * should try to send the command again with different params. */ IWL_ERR(mvm, "Scan failed! ret %d\n", ret); + iwl_mvm_resume_tcm(mvm); return ret; } @@ -1638,9 +1726,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, params.n_scan_plans = req->n_scan_plans; params.scan_plans = req->scan_plans; - params.type = - iwl_mvm_get_scan_type(mvm, - vif->type == NL80211_IFTYPE_P2P_DEVICE); + iwl_mvm_fill_scan_type(mvm, ¶ms, + vif->type == NL80211_IFTYPE_P2P_DEVICE); /* In theory, LMAC scans can handle a 32-bit delay, but since * waiting for over 18 hours to start the scan is a bit silly @@ -1711,6 +1798,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, mvm->scan_vif = NULL; iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); cancel_delayed_work(&mvm->scan_timeout_dwork); + iwl_mvm_resume_tcm(mvm); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; @@ -1827,7 +1915,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) base_size = IWL_SCAN_REQ_UMAC_SIZE_V8; else if (iwl_mvm_is_adaptive_dwell_supported(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; - else if (iwl_mvm_has_new_tx_api(mvm)) + else if (iwl_mvm_cdb_scan_api(mvm)) base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 80067eb9ea05..9263b9aa8b72 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -214,7 +216,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); add_sta_cmd.station_flags |= cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); - if (mvm_sta->associated) + if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); if (sta->wme) { @@ -2466,6 +2468,15 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); + if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && + iwl_mvm_has_new_tx_api(mvm)) { + u8 ac = tid_to_mac80211_ac[tid]; + + ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); + if (ret) + return ret; + } + spin_lock_bh(&mvmsta->lock); /* possible race condition - we entered D0i3 while starting agg */ @@ -2887,7 +2898,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, u32 sta_id, struct ieee80211_key_conf *key, bool mcast, u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, - u8 key_offset) + u8 key_offset, bool mfp) { union { struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; @@ -2960,6 +2971,8 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, if (mcast) key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + if (mfp) + key_flags |= cpu_to_le16(STA_KEY_MFP); u.cmd.common.key_offset = key_offset; u.cmd.common.key_flags = key_flags; @@ -3101,11 +3114,13 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_key_seq seq; u16 p1k[5]; u32 sta_id; + bool mfp = false; if (sta) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); sta_id = mvm_sta->sta_id; + mfp = sta->mfp; } else if (vif->type == NL80211_IFTYPE_AP && !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -3127,7 +3142,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, ieee80211_get_key_rx_seq(keyconf, 0, &seq); ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - seq.tkip.iv32, p1k, 0, key_offset); + seq.tkip.iv32, p1k, 0, key_offset, + mfp); break; case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_WEP40: @@ -3135,11 +3151,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset); + 0, NULL, 0, key_offset, mfp); break; default: ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, - 0, NULL, 0, key_offset); + 0, NULL, 0, key_offset, mfp); } return ret; @@ -3366,6 +3382,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvm_sta; bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); + bool mfp = sta ? sta->mfp : false; rcu_read_lock(); @@ -3373,7 +3390,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, if (WARN_ON_ONCE(!mvm_sta)) goto unlock; iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, - iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); + iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, + mfp); unlock: rcu_read_unlock(); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 5ffd6adbc383..1c43ea8dd8cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,11 +19,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * * The full GNU General Public License is included in this distribution * in the file called COPYING. * @@ -35,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -376,6 +373,7 @@ struct iwl_mvm_rxq_dup_data { * tid. * @max_agg_bufsize: the maximal size of the AGG buffer for this station * @sta_type: station type + * @sta_state: station state according to enum %ieee80211_sta_state * @bt_reduced_txpower: is reduced tx power enabled for this station * @next_status_eosp: the next reclaimed packet is a PS-Poll response and * we need to signal the EOSP @@ -391,7 +389,9 @@ struct iwl_mvm_rxq_dup_data { * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? - * @tlc_amsdu: true if A-MSDU is allowed + * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs. + * In case TLC offload is not active it is either 0xFFFF or 0. + * @max_amsdu_len: max AMSDU length * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue @@ -414,6 +414,7 @@ struct iwl_mvm_sta { u16 tid_disable_agg; u8 max_agg_bufsize; enum iwl_sta_type sta_type; + enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; bool next_status_eosp; spinlock_t lock; @@ -436,9 +437,9 @@ struct iwl_mvm_sta { bool tt_tx_protection; bool disable_tx; - bool tlc_amsdu; + u16 amsdu_enabled; + u16 max_amsdu_len; bool sleeping; - bool associated; u8 agg_tids; u8 sleep_tx_count; u8 avg_energy; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index 3d97436bbdf5..67f360c0d17e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -7,6 +7,7 @@ * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,9 +19,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program. * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -33,6 +32,7 @@ * * Copyright(c) 2014 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -188,8 +188,14 @@ void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (tdls_sta_cnt == 1 && sta_added) iwl_mvm_power_update_mac(mvm); - /* configure the FW with TDLS peer info */ - iwl_mvm_tdls_config(mvm, vif); + /* Configure the FW with TDLS peer info only if TDLS channel switch + * capability is set. + * TDLS config data is used currently only in TDLS channel switch code. + * Supposed to serve also TDLS buffer station which is not implemneted + * yet in FW*/ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) + iwl_mvm_tdls_config(mvm, vif); /* when the last peer leaves, send a power update last */ if (tdls_sta_cnt == 0 && !sta_added) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 795065974d78..cf2591f2ac23 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -767,16 +767,16 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, u16 snap_ip_tcp, pad; unsigned int dbg_max_amsdu_len; netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; - u8 *qc, tid, txf; + u8 tid, txf; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); - if (!sta->max_amsdu_len || + if (!mvmsta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || - (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) + (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len)) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); /* @@ -790,8 +790,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); } - qc = ieee80211_get_qos_ctl(hdr); - tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) return -EINVAL; @@ -803,7 +802,11 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); - max_amsdu_len = sta->max_amsdu_len; + if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) || + !(mvmsta->amsdu_enabled & BIT(tid))) + return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); + + max_amsdu_len = mvmsta->max_amsdu_len; /* the Tx FIFO to which this A-MSDU will be routed */ txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]); @@ -840,8 +843,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * N * subf_len + (N - 1) * pad. */ num_subframes = (max_amsdu_len + pad) / (subf_len + pad); - if (num_subframes > 1) - *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + + if (sta->max_amsdu_subframes && + num_subframes > sta->max_amsdu_subframes) + num_subframes = sta->max_amsdu_subframes; tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; @@ -852,10 +857,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * 1 more for each fragment * 1 more for the potential data in the header */ - num_subframes = - min_t(unsigned int, num_subframes, - (mvm->trans->max_skb_frags - 1 - - skb_shinfo(skb)->nr_frags) / 2); + if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > + mvm->trans->max_skb_frags) + num_subframes = 1; + + if (num_subframes > 1) + *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; /* This skb fits in one single A-MSDU */ if (num_subframes * mss >= tcp_payload_len) { @@ -930,6 +937,32 @@ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) return false; } +static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + int airtime) +{ + int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + + if (mvm->tcm.paused) + return; + + if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD)) + schedule_delayed_work(&mvm->tcm.work, 0); + + mdata->tx.airtime += airtime; +} + +static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, int tid) +{ + u32 ac = tid_to_mac80211_ac[tid]; + int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + + mdata->tx.pkts[ac]++; +} + /* * Sets the fields in the Tx cmd that are crypto related */ @@ -976,9 +1009,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, * assignment of MGMT TID */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { - u8 *qc = NULL; - qc = ieee80211_get_qos_ctl(hdr); - tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + tid = ieee80211_get_tid(hdr); if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) goto drop_unlock_sta; @@ -1067,6 +1098,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); + iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid); + return 0; drop_unlock_sta: @@ -1469,6 +1502,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, if (!IS_ERR(sta)) { mvmsta = iwl_mvm_sta_from_mac80211(sta); + iwl_mvm_tx_airtime(mvm, mvmsta, + le16_to_cpu(tx_resp->wireless_media_time)); + if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; @@ -1610,6 +1646,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, le16_to_cpu(tx_resp->wireless_media_time); mvmsta->tid_data[tid].lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); + iwl_mvm_tx_airtime(mvm, mvmsta, + le16_to_cpu(tx_resp->wireless_media_time)); } rcu_read_unlock(); @@ -1800,6 +1838,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) le32_to_cpu(ba_res->tx_rate)); } + iwl_mvm_tx_airtime(mvm, mvmsta, + le32_to_cpu(ba_res->wireless_time)); out_unlock: rcu_read_unlock(); out: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d99d9ea78e4c..b002a7afb5f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -278,8 +280,8 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) u8 ind = last_idx; int i; - for (i = 0; i < MAX_RS_ANT_NUM; i++) { - ind = (ind + 1) % MAX_RS_ANT_NUM; + for (i = 0; i < MAX_ANT_NUM; i++) { + ind = (ind + 1) % MAX_ANT_NUM; if (valid & BIT(ind)) return ind; } @@ -520,15 +522,15 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) /* set INIT_DONE flag */ iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); /* and wait for clock stabilization */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) udelay(2); err = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (err < 0) { IWL_DEBUG_INFO(trans, @@ -728,12 +730,14 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, .sta_id = sta_id, .tid = tid, }; - int queue; + int queue, size = IWL_DEFAULT_QUEUE_SIZE; - if (cmd.tid == IWL_MAX_TID_COUNT) + if (cmd.tid == IWL_MAX_TID_COUNT) { cmd.tid = IWL_MGMT_TID; + size = IWL_MGMT_QUEUE_SIZE; + } queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd, - SCD_QUEUE_CFG, timeout); + SCD_QUEUE_CFG, size, timeout); if (queue < 0) { IWL_DEBUG_TX_QUEUES(mvm, @@ -1074,23 +1078,48 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return iwl_mvm_power_update_mac(mvm); } +struct iwl_mvm_low_latency_iter { + bool result; + bool result_per_band[NUM_NL80211_BANDS]; +}; + static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { - bool *result = _data; + struct iwl_mvm_low_latency_iter *result = _data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + enum nl80211_band band; - if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif))) - *result = true; + if (iwl_mvm_vif_low_latency(mvmvif)) { + result->result = true; + + if (!mvmvif->phy_ctxt) + return; + + band = mvmvif->phy_ctxt->channel->band; + result->result_per_band[band] = true; + } } bool iwl_mvm_low_latency(struct iwl_mvm *mvm) { - bool result = false; + struct iwl_mvm_low_latency_iter data = {}; ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_ll_iter, &result); + iwl_mvm_ll_iter, &data); - return result; + return data.result; +} + +bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band) +{ + struct iwl_mvm_low_latency_iter data = {}; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_ll_iter, &data); + + return data.result_per_band[band]; } struct iwl_bss_iter_data { @@ -1429,6 +1458,387 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, sta->addr, tid); } +u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed) +{ + if (!elapsed) + return 0; + + return (100 * airtime / elapsed) / USEC_PER_MSEC; +} + +static enum iwl_mvm_traffic_load +iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed) +{ + u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed); + + if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH) + return IWL_MVM_TRAFFIC_HIGH; + if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH) + return IWL_MVM_TRAFFIC_MEDIUM; + + return IWL_MVM_TRAFFIC_LOW; +} + +struct iwl_mvm_tcm_iter_data { + struct iwl_mvm *mvm; + bool any_sent; +}; + +static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + struct iwl_mvm_tcm_iter_data *data = _data; + struct iwl_mvm *mvm = data->mvm; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC; + + if (mvmvif->id >= NUM_MAC_INDEX_DRIVER) + return; + + low_latency = mvm->tcm.result.low_latency[mvmvif->id]; + + if (!mvm->tcm.result.change[mvmvif->id] && + prev == low_latency) { + iwl_mvm_update_quotas(mvm, false, NULL); + return; + } + + if (prev != low_latency) { + /* this sends traffic load and updates quota as well */ + iwl_mvm_update_low_latency(mvm, vif, low_latency, + LOW_LATENCY_TRAFFIC); + } else { + iwl_mvm_update_quotas(mvm, false, NULL); + } + + data->any_sent = true; +} + +static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) +{ + struct iwl_mvm_tcm_iter_data data = { + .mvm = mvm, + .any_sent = false, + }; + + mutex_lock(&mvm->mutex); + + ieee80211_iterate_active_interfaces( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tcm_iter, &data); + + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) + iwl_mvm_config_scan(mvm); + + mutex_unlock(&mvm->mutex); +} + +static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm; + struct iwl_mvm_vif *mvmvif; + struct ieee80211_vif *vif; + + mvmvif = container_of(wk, struct iwl_mvm_vif, + uapsd_nonagg_detected_wk.work); + vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); + mvm = mvmvif->mvm; + + if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions) + return; + + /* remember that this AP is broken */ + memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr, + vif->bss_conf.bssid, ETH_ALEN); + mvm->uapsd_noagg_bssid_write_idx++; + if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN) + mvm->uapsd_noagg_bssid_write_idx = 0; + + iwl_mvm_connection_loss(mvm, vif, + "AP isn't using AMPDU with uAPSD enabled"); +} + +static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + int *mac_id = data; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (mvmvif->id != *mac_id) + return; + + if (!vif->bss_conf.assoc) + return; + + if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd && + !mvmvif->queue_params[IEEE80211_AC_VI].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BE].uapsd && + !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) + return; + + if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected) + return; + + mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true; + IWL_INFO(mvm, + "detected AP should do aggregation but isn't, likely due to U-APSD\n"); + schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); +} + +static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, + unsigned int elapsed, + int mac) +{ + u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; + u64 tpt; + unsigned long rate; + + rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); + + if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions || + mvm->tcm.data[mac].uapsd_nonagg_detect.detected) + return; + + if (iwl_mvm_has_new_rx_api(mvm)) { + tpt = 8 * bytes; /* kbps */ + do_div(tpt, elapsed); + rate *= 1000; /* kbps */ + if (tpt < 22 * rate / 100) + return; + } else { + /* + * the rate here is actually the threshold, in 100Kbps units, + * so do the needed conversion from bytes to 100Kbps: + * 100kb = bits / (100 * 1000), + * 100kbps = 100kb / (msecs / 1000) == + * (bits / (100 * 1000)) / (msecs / 1000) == + * bits / (100 * msecs) + */ + tpt = (8 * bytes); + do_div(tpt, elapsed * 100); + if (tpt < rate) + return; + } + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_uapsd_agg_disconnect_iter, &mac); +} + +static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u32 *band = _data; + + if (!mvmvif->phy_ctxt) + return; + + band[mvmvif->id] = mvmvif->phy_ctxt->channel->band; +} + +static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm, + unsigned long ts, + bool handle_uapsd) +{ + unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts); + unsigned int uapsd_elapsed = + jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts); + u32 total_airtime = 0; + u32 band_airtime[NUM_NL80211_BANDS] = {0}; + u32 band[NUM_MAC_INDEX_DRIVER] = {0}; + int ac, mac, i; + bool low_latency = false; + enum iwl_mvm_traffic_load load, band_load; + bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD); + + if (handle_ll) + mvm->tcm.ll_ts = ts; + if (handle_uapsd) + mvm->tcm.uapsd_nonagg_ts = ts; + + mvm->tcm.result.elapsed = elapsed; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_tcm_iterator, + &band); + + for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + u32 vo_vi_pkts = 0; + u32 airtime = mdata->rx.airtime + mdata->tx.airtime; + + total_airtime += airtime; + band_airtime[band[mac]] += airtime; + + load = iwl_mvm_tcm_load(mvm, airtime, elapsed); + mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac]; + mvm->tcm.result.load[mac] = load; + mvm->tcm.result.airtime[mac] = airtime; + + for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++) + vo_vi_pkts += mdata->rx.pkts[ac] + + mdata->tx.pkts[ac]; + + /* enable immediately with enough packets but defer disabling */ + if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH) + mvm->tcm.result.low_latency[mac] = true; + else if (handle_ll) + mvm->tcm.result.low_latency[mac] = false; + + if (handle_ll) { + /* clear old data */ + memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); + memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); + } + low_latency |= mvm->tcm.result.low_latency[mac]; + + if (!mvm->tcm.result.low_latency[mac] && handle_uapsd) + iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed, + mac); + /* clear old data */ + if (handle_uapsd) + mdata->uapsd_nonagg_detect.rx_bytes = 0; + memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); + memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); + } + + load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed); + mvm->tcm.result.global_change = load != mvm->tcm.result.global_load; + mvm->tcm.result.global_load = load; + + for (i = 0; i < NUM_NL80211_BANDS; i++) { + band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed); + mvm->tcm.result.band_load[i] = band_load; + } + + /* + * If the current load isn't low we need to force re-evaluation + * in the TCM period, so that we can return to low load if there + * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get + * triggered by traffic). + */ + if (load != IWL_MVM_TRAFFIC_LOW) + return MVM_TCM_PERIOD; + /* + * If low-latency is active we need to force re-evaluation after + * (the longer) MVM_LL_PERIOD, so that we can disable low-latency + * when there's no traffic at all. + */ + if (low_latency) + return MVM_LL_PERIOD; + /* + * Otherwise, we don't need to run the work struct because we're + * in the default "idle" state - traffic indication is low (which + * also covers the "no traffic" case) and low-latency is disabled + * so there's no state that may need to be disabled when there's + * no traffic at all. + * + * Note that this has no impact on the regular scheduling of the + * updates triggered by traffic - those happen whenever one of the + * two timeouts expire (if there's traffic at all.) + */ + return 0; +} + +void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) +{ + unsigned long ts = jiffies; + bool handle_uapsd = + time_after(ts, mvm->tcm.uapsd_nonagg_ts + + msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD)); + + spin_lock(&mvm->tcm.lock); + if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { + spin_unlock(&mvm->tcm.lock); + return; + } + spin_unlock(&mvm->tcm.lock); + + if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) { + mutex_lock(&mvm->mutex); + if (iwl_mvm_request_statistics(mvm, true)) + handle_uapsd = false; + mutex_unlock(&mvm->mutex); + } + + spin_lock(&mvm->tcm.lock); + /* re-check if somebody else won the recheck race */ + if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) { + /* calculate statistics */ + unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts, + handle_uapsd); + + /* the memset needs to be visible before the timestamp */ + smp_mb(); + mvm->tcm.ts = ts; + if (work_delay) + schedule_delayed_work(&mvm->tcm.work, work_delay); + } + spin_unlock(&mvm->tcm.lock); + + iwl_mvm_tcm_results(mvm); +} + +void iwl_mvm_tcm_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, + tcm.work); + + iwl_mvm_recalc_tcm(mvm); +} + +void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel) +{ + spin_lock_bh(&mvm->tcm.lock); + mvm->tcm.paused = true; + spin_unlock_bh(&mvm->tcm.lock); + if (with_cancel) + cancel_delayed_work_sync(&mvm->tcm.work); +} + +void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) +{ + int mac; + + spin_lock_bh(&mvm->tcm.lock); + mvm->tcm.ts = jiffies; + mvm->tcm.ll_ts = jiffies; + for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { + struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + + memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts)); + memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); + memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); + memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); + } + /* The TCM data needs to be reset before "paused" flag changes */ + smp_mb(); + mvm->tcm.paused = false; + spin_unlock_bh(&mvm->tcm.lock); +} + +void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk, + iwl_mvm_tcm_uapsd_nonagg_detected_wk); +} + +void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk); +} + + void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime) { bool ps_disabled; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 5ef216f3a60b..3fc4343581ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ctxt_info->hcmd_cfg.cmd_queue_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = - TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size); + TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 959de2f8bb28..38234bda9017 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -836,6 +836,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct iwl_trans *iwl_trans; int ret; + if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n")) + return -EINVAL; + iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); if (IS_ERR(iwl_trans)) return PTR_ERR(iwl_trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index ca3b64ff4dad..45ea32796cda 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -383,7 +383,8 @@ struct iwl_self_init_dram { * @hw_init_mask: initial unmasked hw causes * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes - * @tx_cmd_queue_size: the size of the tx command queue + * @in_rescan: true if we have triggered a device rescan + * @scheduled_for_removal: true if we have scheduled a device removal */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -466,6 +467,8 @@ struct iwl_trans_pcie { u32 hw_mask; cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; u16 tx_cmd_queue_size; + bool in_rescan; + bool scheduled_for_removal; }; static inline struct iwl_trans_pcie * @@ -537,7 +540,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); -void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans); static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, u8 idx) @@ -822,7 +824,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, + int cmd_id, int size, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f25ce3a1ea50..d15f5ba2dc77 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -201,7 +202,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg); iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); rxq->need_update = true; return; } @@ -901,6 +902,8 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) } def_rxq = trans_pcie->rxq; + cancel_work_sync(&rba->rx_alloc); + spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index cb4012541f45..b8e8dac2895d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -92,7 +94,8 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); /* * Wait for clock stabilization; once stabilized, access to @@ -100,8 +103,9 @@ static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) * and accesses to uCode SRAM. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), + 25000); if (ret < 0) { IWL_DEBUG_INFO(trans, "Failed to init the card\n"); return ret; @@ -143,7 +147,8 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_clear_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); } void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) @@ -187,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_gen2_apm_stop(trans, false); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 5517ea4c2aa0..7229991ae70d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -35,6 +36,7 @@ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,6 +75,7 @@ #include <linux/gfp.h> #include <linux/vmalloc.h> #include <linux/pm_runtime.h> +#include <linux/module.h> #include "iwl-drv.h" #include "iwl-trans.h" @@ -179,7 +182,8 @@ out: static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, + BIT(trans->cfg->csr->flag_sw_reset)); usleep_range(5000, 6000); } @@ -372,7 +376,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); /* * Wait for clock stabilization; once stabilized, access to @@ -380,8 +385,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) * and accesses to uCode SRAM. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), + 25000); if (ret < 0) { IWL_ERR(trans, "Failed to init the card\n"); return ret; @@ -459,15 +465,16 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); /* * Wait for clock stabilization; once stabilized, access to * device-internal resources is possible. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (WARN_ON(ret < 0)) { IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); @@ -519,7 +526,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); /* Activates XTAL resources monitor */ __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, @@ -541,11 +548,12 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans) int ret; /* stop device's busmaster DMA activity */ - iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset, + BIT(trans->cfg->csr->flag_stop_master)); - ret = iwl_poll_bit(trans, CSR_RESET, - CSR_RESET_REG_FLAG_MASTER_DISABLED, - CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset, + BIT(trans->cfg->csr->flag_master_dis), + BIT(trans->cfg->csr->flag_master_dis), 100); if (ret < 0) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); @@ -594,7 +602,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); } static int iwl_pcie_nic_init(struct iwl_trans *trans) @@ -1267,7 +1275,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); /* Stop the device, and put it in low power state */ iwl_pcie_apm_stop(trans, false); @@ -1497,9 +1505,9 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); iwl_pcie_enable_rx_wake(trans, false); @@ -1543,15 +1551,17 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_reset_ict(trans); iwl_enable_interrupts(trans); - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_mac_access_req)); + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (ret < 0) { IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); @@ -1562,7 +1572,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); } else { iwl_trans_pcie_tx_reset(trans); @@ -1926,6 +1936,29 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) clear_bit(STATUS_TPOWER_PMI, &trans->status); } +struct iwl_trans_pcie_removal { + struct pci_dev *pdev; + struct work_struct work; +}; + +static void iwl_trans_pcie_removal_wk(struct work_struct *wk) +{ + struct iwl_trans_pcie_removal *removal = + container_of(wk, struct iwl_trans_pcie_removal, work); + struct pci_dev *pdev = removal->pdev; + char *prop[] = {"EVENT=INACCESSIBLE", NULL}; + + dev_err(&pdev->dev, "Device gone - attempting removal\n"); + kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); + pci_lock_rescan_remove(); + pci_dev_put(pdev); + pci_stop_and_remove_bus_device(pdev); + pci_unlock_rescan_remove(); + + kfree(removal); + module_put(THIS_MODULE); +} + static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, unsigned long *flags) { @@ -1939,7 +1972,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, /* this bit wakes up the NIC */ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) udelay(2); @@ -1964,15 +1997,59 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * and do not save/restore SRAM when power cycling. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + BIT(trans->cfg->csr->flag_val_mac_access_en), + (BIT(trans->cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (unlikely(ret < 0)) { - iwl_trans_pcie_dump_regs(trans); - iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); + u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); + WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", - iwl_read32(trans, CSR_GP_CNTRL)); + cntrl); + + iwl_trans_pcie_dump_regs(trans); + + if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { + struct iwl_trans_pcie_removal *removal; + + if (trans_pcie->scheduled_for_removal) + goto err; + + IWL_ERR(trans, "Device gone - scheduling removal!\n"); + + /* + * get a module reference to avoid doing this + * while unloading anyway and to avoid + * scheduling a work with code that's being + * removed. + */ + if (!try_module_get(THIS_MODULE)) { + IWL_ERR(trans, + "Module is being unloaded - abort\n"); + goto err; + } + + removal = kzalloc(sizeof(*removal), GFP_ATOMIC); + if (!removal) { + module_put(THIS_MODULE); + goto err; + } + /* + * we don't need to clear this flag, because + * the trans will be freed and reallocated. + */ + trans_pcie->scheduled_for_removal = true; + + removal->pdev = to_pci_dev(trans->dev); + INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); + pci_dev_get(removal->pdev); + schedule_work(&removal->work); + } else { + iwl_write32(trans, CSR_RESET, + CSR_RESET_REG_FLAG_FORCE_NMI); + } + +err: spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags); return false; } @@ -2003,7 +2080,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans, goto out; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); /* * Above we read the CSR_GP_CNTRL register, which will flush * any previous writes, but we need the write that clears the @@ -3232,12 +3309,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * id located at the AUX bus MISC address space. */ iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + BIT(trans->cfg->csr->flag_init_done)); udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), 25000); if (ret < 0) { IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index fabae0f60683..48890a1c825f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -488,6 +488,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + if (iwl_queue_space(txq) < txq->high_mark) { + iwl_stop_queue(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_queue_space(txq) < 3)) { + struct iwl_device_cmd **dev_cmd_ptr; + + dev_cmd_ptr = (void *)((u8 *)skb->cb + + trans_pcie->dev_cmd_offs); + + *dev_cmd_ptr = dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + spin_unlock(&txq->lock); + return 0; + } + } + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); /* Set up driver data for this TFD */ @@ -523,9 +540,6 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Tell device the write index *just past* this latest filled TFD */ txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); - if (iwl_queue_space(txq) < txq->high_mark) - iwl_stop_queue(trans, txq); - /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. @@ -555,15 +569,13 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; - int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + int i, cmd_pos, idx; u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); - - memset(tfd, 0, sizeof(*tfd)); + struct iwl_tfh_tfd *tfd; copy_size = sizeof(struct iwl_cmd_header_wide); cmd_size = sizeof(struct iwl_cmd_header_wide); @@ -634,6 +646,10 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, spin_lock_bh(&txq->lock); + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); + memset(tfd, 0, sizeof(*tfd)); + if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); @@ -957,6 +973,13 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } } + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ @@ -972,7 +995,7 @@ static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, + trans_pcie->tfd_size * txq->n_window, txq->tfds, txq->dma_addr); dma_free_coherent(dev, sizeof(*txq->first_tb_bufs) * txq->n_window, @@ -1020,7 +1043,7 @@ static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue_cfg_cmd *cmd, - int cmd_id, + int cmd_id, int size, unsigned int timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1046,12 +1069,12 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, return -ENOMEM; } - ret = iwl_pcie_txq_alloc(trans, txq, TFD_TX_CMD_SLOTS, false); + ret = iwl_pcie_txq_alloc(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue alloc failed\n"); goto error; } - ret = iwl_pcie_txq_init(trans, txq, TFD_TX_CMD_SLOTS, false); + ret = iwl_pcie_txq_init(trans, txq, size, false); if (ret) { IWL_ERR(trans, "Tx queue init failed\n"); goto error; @@ -1061,7 +1084,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS)); + cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) @@ -1152,8 +1175,6 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) struct iwl_txq *cmd_queue; int txq_id = trans_pcie->cmd_queue, ret; - iwl_pcie_set_tx_cmd_queue_size(trans); - /* alloc and init the command queue */ if (!trans_pcie->txq[txq_id]) { cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL); @@ -1162,8 +1183,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) return -ENOMEM; } trans_pcie->txq[txq_id] = cmd_queue; - ret = iwl_pcie_txq_alloc(trans, cmd_queue, - trans_pcie->tx_cmd_queue_size, true); + ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; @@ -1172,8 +1192,7 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) cmd_queue = trans_pcie->txq[txq_id]; } - ret = iwl_pcie_txq_init(trans, cmd_queue, - trans_pcie->tx_cmd_queue_size, true); + ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 1a566287993d..473fe7ccb07c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -273,7 +274,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", txq_id, reg); iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); txq->need_update = true; return; } @@ -495,6 +496,9 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; + if (trans->cfg->use_tfh) + tfd_sz = trans_pcie->tfd_size * slots_num; + timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); txq->trans_pcie = trans_pcie; @@ -608,7 +612,7 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(trans->cfg->csr->flag_mac_access_req)); } /* @@ -950,8 +954,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); - slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size : - TFD_TX_CMD_SLOTS; + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); @@ -970,21 +973,6 @@ error: return ret; } -void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int queue_size = TFD_CMD_SLOTS; - - if (trans->cfg->tx_cmd_queue_size) - queue_size = trans->cfg->tx_cmd_queue_size; - - if (WARN_ON(!(is_power_of_2(queue_size) && - TFD_QUEUE_CB_SIZE(queue_size) > 0))) - trans_pcie->tx_cmd_queue_size = TFD_CMD_SLOTS; - else - trans_pcie->tx_cmd_queue_size = queue_size; -} - int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -992,8 +980,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) int txq_id, slots_num; bool alloc = false; - iwl_pcie_set_tx_cmd_queue_size(trans); - if (!trans_pcie->txq_memory) { ret = iwl_pcie_tx_alloc(trans); if (ret) @@ -1017,8 +1003,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); - slots_num = cmd_queue ? trans_pcie->tx_cmd_queue_size : - TFD_TX_CMD_SLOTS; + slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); if (ret) { @@ -1166,7 +1151,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, * In that case, iwl_queue_space will be small again * and we won't wake mac80211's queue. */ - iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); + iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } spin_lock_bh(&txq->lock); @@ -1187,6 +1172,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + const struct iwl_cfg *cfg = trans->cfg; int ret; lockdep_assert_held(&trans_pcie->reg_lock); @@ -1204,19 +1190,19 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ - if (trans->cfg->base_params->apmg_wake_up_wa && + if (cfg->base_params->apmg_wake_up_wa && !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(cfg->csr->flag_mac_access_req)); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, - (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + BIT(cfg->csr->flag_val_mac_access_en), + (BIT(cfg->csr->flag_mac_clock_ready) | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + BIT(cfg->csr->flag_mac_access_req)); IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } @@ -2411,7 +2397,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); - if (amsdu) { + /* + * If gso_size wasn't set, don't give the frame "amsdu treatment" + * (adding subframes, etc.). + * This can happen in some testing flows when the amsdu was already + * pre-built, and we just need to send the resulting skb. + */ + if (amsdu && skb_shinfo(skb)->gso_size) { if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, out_meta, dev_cmd, tb1_len))) diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c index 9b0ded733294..b277113b33d3 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_eth.c +++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c @@ -57,7 +57,7 @@ islpci_eth_cleanup_transmit(islpci_private *priv, #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "cleanup skb %p skb->data %p skb->len %u truesize %u\n ", + "cleanup skb %p skb->data %p skb->len %u truesize %u\n", skb, skb->data, skb->len, skb->truesize); #endif @@ -328,7 +328,7 @@ islpci_eth_receive(islpci_private *priv) #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n ", + "frq->addr %x skb->data %p skb->len %u offset %u truesize %u\n", control_block->rx_data_low[priv->free_data_rx].address, skb->data, skb->len, offset, skb->truesize); #endif @@ -436,7 +436,7 @@ islpci_eth_receive(islpci_private *priv) #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, - "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n ", + "new alloc skb %p skb->data %p skb->len %u index %u truesize %u\n", skb, skb->data, skb->len, index, skb->truesize); #endif diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 920c23e542a5..9825bfd42abc 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2514,7 +2514,6 @@ static void hwsim_mcast_new_radio(int id, struct genl_info *info, return; out_err: - genlmsg_cancel(mcast_skb, data); nlmsg_free(mcast_skb); } @@ -2650,6 +2649,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, TDLS_WIDER_BW); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 7f7e9de2db1c..a67e2d66ac9d 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -929,7 +929,7 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, adapter->rx_locked = false; spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); - mwifiex_set_mac_address(priv, dev); + mwifiex_set_mac_address(priv, dev, false, NULL); return 0; } @@ -1158,6 +1158,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as IBSS\n", dev->name); + /* fall through */ case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */ return 0; default: @@ -1188,6 +1189,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as STA\n", dev->name); + /* fall through */ case NL80211_IFTYPE_STATION: /* This shouldn't happen */ return 0; default: @@ -1210,6 +1212,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as AP\n", dev->name); + /* fall through */ case NL80211_IFTYPE_AP: /* This shouldn't happen */ return 0; default: @@ -1249,6 +1252,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, case NL80211_IFTYPE_UNSPECIFIED: mwifiex_dbg(priv->adapter, INFO, "%s: kept type as P2P\n", dev->name); + /* fall through */ case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: return 0; @@ -1979,7 +1983,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, bss_cfg->bcast_ssid_ctl = 0; break; case NL80211_HIDDEN_SSID_ZERO_CONTENTS: - /* firmware doesn't support this type of hidden SSID */ + bss_cfg->bcast_ssid_ctl = 2; + break; default: kfree(bss_cfg); return -EINVAL; @@ -2257,7 +2262,7 @@ done: if (!bss) { if (is_scanning_required) { - mwifiex_dbg(priv->adapter, WARN, + mwifiex_dbg(priv->adapter, MSG, "assoc: requested bss not found in scan results\n"); break; } @@ -2978,7 +2983,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, priv->netdev = dev; if (!adapter->mfg_mode) { - mwifiex_set_mac_address(priv, dev); + mwifiex_set_mac_address(priv, dev, false, NULL); ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, HostCmd_ACT_GEN_SET, 0, NULL, true); @@ -3554,6 +3559,9 @@ static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + if (!ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info)) + return -EOPNOTSUPP; + return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG, HostCmd_ACT_GEN_SET, 0, data, true); } @@ -4189,6 +4197,16 @@ static const struct wiphy_wowlan_support mwifiex_wowlan_support = { .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS, }; + +static const struct wiphy_wowlan_support mwifiex_wowlan_support_no_gtk = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_NET_DETECT, + .n_patterns = MWIFIEX_MEF_MAX_FILTERS, + .pattern_min_len = 1, + .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, + .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, + .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS, +}; #endif static bool mwifiex_is_valid_alpha2(const char *alpha2) @@ -4307,7 +4325,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) WIPHY_FLAG_TDLS_EXTERNAL_SETUP; #ifdef CONFIG_PM - wiphy->wowlan = &mwifiex_wowlan_support; + if (ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info)) + wiphy->wowlan = &mwifiex_wowlan_support; + else + wiphy->wowlan = &mwifiex_wowlan_support_no_gtk; #endif wiphy->coalesce = &mwifiex_coalesce_support; diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 7014f440e6f8..9cfcdf6bec52 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -25,7 +25,6 @@ #include "main.h" #include "wmm.h" #include "11n.h" -#include "11ac.h" static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index db2872daae97..07453932f703 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -154,34 +154,6 @@ free_and_exit: } /* - * Proc device dump read handler. - * - * This function is called when the 'device_dump' file is opened for - * reading. - * This function dumps driver information and firmware memory segments - * (ex. DTCM, ITCM, SQRAM etc.) for - * debugging. - */ -static ssize_t -mwifiex_device_dump_read(struct file *file, char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct mwifiex_private *priv = file->private_data; - - /* For command timeouts, USB firmware will automatically emit - * firmware dump events, so we don't implement device_dump(). - * For user-initiated dumps, we trigger it ourselves. - */ - if (priv->adapter->iface_type == MWIFIEX_USB) - mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT, - HostCmd_ACT_GEN_SET, 0, NULL, true); - else - priv->adapter->if_ops.device_dump(priv->adapter); - - return 0; -} - -/* * Proc getlog file read handler. * * This function is called when the 'getlog' file is opened for reading @@ -980,7 +952,6 @@ static const struct file_operations mwifiex_dfs_##name##_fops = { \ MWIFIEX_DFS_FILE_READ_OPS(info); MWIFIEX_DFS_FILE_READ_OPS(debug); MWIFIEX_DFS_FILE_READ_OPS(getlog); -MWIFIEX_DFS_FILE_READ_OPS(device_dump); MWIFIEX_DFS_FILE_OPS(regrdwr); MWIFIEX_DFS_FILE_OPS(rdeeprom); MWIFIEX_DFS_FILE_OPS(memrw); @@ -1011,7 +982,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(getlog); MWIFIEX_DFS_ADD_FILE(regrdwr); MWIFIEX_DFS_ADD_FILE(rdeeprom); - MWIFIEX_DFS_ADD_FILE(device_dump); + MWIFIEX_DFS_ADD_FILE(memrw); MWIFIEX_DFS_ADD_FILE(hscfg); MWIFIEX_DFS_ADD_FILE(histogram); diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index c5dc518f768b..b73f99dc5a72 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -249,6 +249,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16)) #define ISSUPP_ADHOC_ENABLED(FwCapInfo) (FwCapInfo & BIT(25)) #define ISSUPP_RANDOM_MAC(FwCapInfo) (FwCapInfo & BIT(27)) +#define ISSUPP_FIRMWARE_SUPPLICANT(FwCapInfo) (FwCapInfo & BIT(21)) #define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \ diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 922e3d69fd84..b10baacb51c9 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -349,6 +349,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, case WLAN_EID_SUPP_RATES: case WLAN_EID_COUNTRY: case WLAN_EID_PWR_CONSTRAINT: + case WLAN_EID_ERP_INFO: case WLAN_EID_EXT_SUPP_RATES: case WLAN_EID_HT_CAPABILITY: case WLAN_EID_HT_OPERATION: diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index b6484582845a..510f6b8e717d 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -858,7 +858,7 @@ mwifiex_clone_skb_for_tx_status(struct mwifiex_private *priv, /* * CFG802.11 network device handler for data transmission. */ -static int +static netdev_tx_t mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); @@ -940,28 +940,32 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } int mwifiex_set_mac_address(struct mwifiex_private *priv, - struct net_device *dev) + struct net_device *dev, bool external, + u8 *new_mac) { int ret; u64 mac_addr, old_mac_addr; - if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY) - return -ENOTSUPP; + old_mac_addr = ether_addr_to_u64(priv->curr_addr); - mac_addr = ether_addr_to_u64(priv->curr_addr); - old_mac_addr = mac_addr; + if (external) { + mac_addr = ether_addr_to_u64(new_mac); + } else { + /* Internal mac address change */ + if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY) + return -ENOTSUPP; - if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) - mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); + mac_addr = old_mac_addr; - if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) { - /* Set mac address based on bss_type/bss_num */ - mac_addr ^= BIT_ULL(priv->bss_type + 8); - mac_addr += priv->bss_num; - } + if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P) + mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); - if (mac_addr == old_mac_addr) - goto done; + if (mwifiex_get_intf_num(priv->adapter, priv->bss_type) > 1) { + /* Set mac address based on bss_type/bss_num */ + mac_addr ^= BIT_ULL(priv->bss_type + 8); + mac_addr += priv->bss_num; + } + } u64_to_ether_addr(mac_addr, priv->curr_addr); @@ -976,7 +980,6 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, return ret; } -done: ether_addr_copy(dev->dev_addr, priv->curr_addr); return 0; } @@ -989,8 +992,7 @@ mwifiex_ndo_set_mac_address(struct net_device *dev, void *addr) struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); struct sockaddr *hw_addr = addr; - memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN); - return mwifiex_set_mac_address(priv, dev); + return mwifiex_set_mac_address(priv, dev, true, hw_addr->sa_data); } /* @@ -1331,7 +1333,10 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, priv->assocresp_idx = MWIFIEX_AUTO_IDX_MASK; priv->gen_idx = MWIFIEX_AUTO_IDX_MASK; priv->num_tx_timeout = 0; - ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); + if (is_valid_ether_addr(dev->dev_addr)) + ether_addr_copy(priv->curr_addr, dev->dev_addr); + else + ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 9bde181700dc..69ac0a22c28c 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -84,8 +84,8 @@ enum { #define MWIFIEX_TIMER_10S 10000 #define MWIFIEX_TIMER_1S 1000 -#define MAX_TX_PENDING 100 -#define LOW_TX_PENDING 80 +#define MAX_TX_PENDING 400 +#define LOW_TX_PENDING 380 #define HIGH_RX_PENDING 50 #define LOW_RX_PENDING 20 @@ -1691,6 +1691,7 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter); void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); +void mwifiex_fw_dump_event(struct mwifiex_private *priv); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, int cmd_type, @@ -1709,7 +1710,8 @@ void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, struct sk_buff *event_skb); void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter); int mwifiex_set_mac_address(struct mwifiex_private *priv, - struct net_device *dev); + struct net_device *dev, + bool external, u8 *new_mac); void mwifiex_devdump_tmo_func(unsigned long function_context); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 97a6199692ab..0c42b7296ddd 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -320,6 +320,19 @@ static void mwifiex_pcie_shutdown(struct pci_dev *pdev) return; } +static void mwifiex_pcie_coredump(struct device *dev) +{ + struct pci_dev *pdev; + struct pcie_service_card *card; + + pdev = container_of(dev, struct pci_dev, dev); + card = pci_get_drvdata(pdev); + + if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, + &card->work_flags)) + schedule_work(&card->work); +} + static const struct pci_device_id mwifiex_ids[] = { { PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, @@ -415,11 +428,12 @@ static struct pci_driver __refdata mwifiex_pcie = { .id_table = mwifiex_ids, .probe = mwifiex_pcie_probe, .remove = mwifiex_pcie_remove, -#ifdef CONFIG_PM_SLEEP .driver = { + .coredump = mwifiex_pcie_coredump, +#ifdef CONFIG_PM_SLEEP .pm = &mwifiex_pcie_pm_ops, - }, #endif + }, .shutdown = mwifiex_pcie_shutdown, .err_handler = &mwifiex_pcie_err_handler, }; @@ -1881,7 +1895,8 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, EVENT, "info: Event length: %d\n", evt_len); - if ((evt_len > 0) && (evt_len < MAX_EVENT_SIZE)) + if (evt_len > MWIFIEX_EVENT_HEADER_LEN && + evt_len < MAX_EVENT_SIZE) memcpy(adapter->event_body, skb_cmd->data + MWIFIEX_EVENT_HEADER_LEN, evt_len - MWIFIEX_EVENT_HEADER_LEN); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index d7ce7f75ae38..895b806cdb03 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -482,7 +482,8 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, scan_chan_list[chan_idx].max_scan_time = cpu_to_le16((u16) user_scan_in-> chan_list[0].scan_time); - else if (ch->flags & IEEE80211_CHAN_NO_IR) + else if ((ch->flags & IEEE80211_CHAN_NO_IR) || + (ch->flags & IEEE80211_CHAN_RADAR)) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->passive_scan_time); else @@ -502,10 +503,12 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, scan_chan_list[chan_idx].chan_scan_mode_bitmap |= MWIFIEX_DISABLE_CHAN_FILT; - if (filtered_scan) { + if (filtered_scan && + !((ch->flags & IEEE80211_CHAN_NO_IR) || + (ch->flags & IEEE80211_CHAN_RADAR))) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->specific_scan_time); - } + chan_idx++; } @@ -1308,6 +1311,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, case WLAN_EID_CHANNEL_SWITCH: bss_entry->chan_sw_ie_present = true; + /* fall through */ case WLAN_EID_PWR_CAPABILITY: case WLAN_EID_TPC_REPORT: case WLAN_EID_QUIET: diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index a82880132af4..47d2dcc3f28f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -466,6 +466,17 @@ static int mwifiex_sdio_suspend(struct device *dev) return ret; } +static void mwifiex_sdio_coredump(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct sdio_mmc_card *card; + + card = sdio_get_drvdata(func); + if (!test_and_set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, + &card->work_flags)) + schedule_work(&card->work); +} + /* Device ID for SD8786 */ #define SDIO_DEVICE_ID_MARVELL_8786 (0x9116) /* Device ID for SD8787 */ @@ -515,6 +526,7 @@ static struct sdio_driver mwifiex_sdio = { .remove = mwifiex_sdio_remove, .drv = { .owner = THIS_MODULE, + .coredump = mwifiex_sdio_coredump, .pm = &mwifiex_sdio_pm_ops, } }; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 93dfb76cd8a6..03a6492662ca 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -241,6 +241,9 @@ void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code, if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); + if (!ISSUPP_FIRMWARE_SUPPLICANT(priv->adapter->fw_cap_info)) + return; + mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG, HostCmd_ACT_GEN_REMOVE, 0, NULL, false); } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c index e8c8728db15a..e86217a6b9ca 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_event.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c @@ -108,7 +108,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) struct mwifiex_adapter *adapter = priv->adapter; int len, i; u32 eventcause = adapter->event_cause; - struct station_info sinfo; + struct station_info *sinfo; struct mwifiex_assoc_event *event; struct mwifiex_sta_node *node; u8 *deauth_mac; @@ -117,7 +117,10 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) switch (eventcause) { case EVENT_UAP_STA_ASSOC: - memset(&sinfo, 0, sizeof(sinfo)); + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + event = (struct mwifiex_assoc_event *) (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER); if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) { @@ -132,28 +135,31 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) len = ETH_ALEN; if (len != -1) { - sinfo.assoc_req_ies = &event->data[len]; - len = (u8 *)sinfo.assoc_req_ies - + sinfo->assoc_req_ies = &event->data[len]; + len = (u8 *)sinfo->assoc_req_ies - (u8 *)&event->frame_control; - sinfo.assoc_req_ies_len = + sinfo->assoc_req_ies_len = le16_to_cpu(event->len) - (u16)len; } } - cfg80211_new_sta(priv->netdev, event->sta_addr, &sinfo, + cfg80211_new_sta(priv->netdev, event->sta_addr, sinfo, GFP_KERNEL); node = mwifiex_add_sta_entry(priv, event->sta_addr); if (!node) { mwifiex_dbg(adapter, ERROR, "could not create station entry!\n"); + kfree(sinfo); return -1; } - if (!priv->ap_11n_enabled) + if (!priv->ap_11n_enabled) { + kfree(sinfo); break; + } - mwifiex_set_sta_ht_cap(priv, sinfo.assoc_req_ies, - sinfo.assoc_req_ies_len, node); + mwifiex_set_sta_ht_cap(priv, sinfo->assoc_req_ies, + sinfo->assoc_req_ies_len, node); for (i = 0; i < MAX_NUM_TID; i++) { if (node->is_11n_enabled) @@ -163,6 +169,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) node->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED; } memset(node->rx_seq, 0xff, sizeof(node->rx_seq)); + kfree(sinfo); break; case EVENT_UAP_STA_DEAUTH: deauth_mac = adapter->event_body + diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 4bc244801636..6e3cf9817730 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -644,6 +644,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) MWIFIEX_FUNC_SHUTDOWN); } + if (adapter->workqueue) + flush_workqueue(adapter->workqueue); + mwifiex_usb_free(card); mwifiex_dbg(adapter, FATAL, @@ -653,6 +656,15 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) usb_put_dev(interface_to_usbdev(intf)); } +static void mwifiex_usb_coredump(struct device *dev) +{ + struct usb_interface *intf = to_usb_interface(dev); + struct usb_card_rec *card = usb_get_intfdata(intf); + + mwifiex_fw_dump_event(mwifiex_get_priv(card->adapter, + MWIFIEX_BSS_ROLE_ANY)); +} + static struct usb_driver mwifiex_usb_driver = { .name = "mwifiex_usb", .probe = mwifiex_usb_probe, @@ -661,6 +673,9 @@ static struct usb_driver mwifiex_usb_driver = { .suspend = mwifiex_usb_suspend, .resume = mwifiex_usb_resume, .soft_unbind = 1, + .drvwrap.driver = { + .coredump = mwifiex_usb_coredump, + }, }; static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf, diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 0cd68ffc2c74..6dd212898117 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -708,12 +708,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr, s8 nflr) { struct mwifiex_histogram_data *phist_data = priv->hist_data; + s8 nf = -nflr; + s8 rssi = snr - nflr; atomic_inc(&phist_data->num_samples); atomic_inc(&phist_data->rx_rate[rx_rate]); - atomic_inc(&phist_data->snr[snr]); - atomic_inc(&phist_data->noise_flr[128 + nflr]); - atomic_inc(&phist_data->sig_str[nflr - snr]); + atomic_inc(&phist_data->snr[snr + 128]); + atomic_inc(&phist_data->noise_flr[nf + 128]); + atomic_inc(&phist_data->sig_str[rssi + 128]); } /* function to reset histogram data during init/reset */ @@ -755,3 +757,10 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags) return skb; } EXPORT_SYMBOL_GPL(mwifiex_alloc_dma_align_buf); + +void mwifiex_fw_dump_event(struct mwifiex_private *priv) +{ + mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT, HostCmd_ACT_GEN_SET, + 0, NULL, true); +} +EXPORT_SYMBOL_GPL(mwifiex_fw_dump_event); diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c index 89c7d8c7eb48..1e8cdce919d9 100644 --- a/drivers/net/wireless/mediatek/mt76/agg-rx.c +++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c @@ -103,6 +103,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) __skb_queue_head_init(&frames); local_bh_disable(); + rcu_read_lock(); spin_lock(&tid->lock); mt76_rx_aggr_check_release(tid, &frames); @@ -114,6 +115,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work) REORDER_TIMEOUT); mt76_rx_complete(dev, &frames, -1); + rcu_read_unlock(); local_bh_enable(); } @@ -147,12 +149,13 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames) void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct mt76_wcid *wcid = status->wcid; struct ieee80211_sta *sta; struct mt76_rx_tid *tid; bool sn_less; u16 seqno, head, size; - u8 idx; + u8 ackp, idx; __skb_queue_tail(frames, skb); @@ -165,10 +168,17 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames) return; } + /* not part of a BA session */ + ackp = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_ACK_POLICY_MASK; + if (ackp != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && + ackp != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) + return; + tid = rcu_dereference(wcid->aggr[status->tid]); if (!tid) return; + status->flag |= RX_FLAG_DUP_VALIDATED; spin_lock_bh(&tid->lock); if (tid->stopped) @@ -257,6 +267,8 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) u8 size = tid->size; int i; + cancel_delayed_work(&tid->reorder_work); + spin_lock_bh(&tid->lock); tid->stopped = true; @@ -271,8 +283,6 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid) } spin_unlock_bh(&tid->lock); - - cancel_delayed_work_sync(&tid->reorder_work); } void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno) diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 3518703524e7..3dbedcedc2c4 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -178,6 +178,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) mt76_dma_sync_idx(dev, q); wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; + + if (!q->queued) + wake_up(&dev->tx_wait); + spin_unlock_bh(&q->lock); if (wake) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 4f30cdcd2b53..fcd079a96782 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -213,7 +213,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband, vht_cap->vht_supported = true; vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_RXSTBC_1 | - IEEE80211_VHT_CAP_SHORT_GI_80; + IEEE80211_VHT_CAP_SHORT_GI_80 | + (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); return 0; } @@ -267,6 +268,27 @@ mt76_check_sband(struct mt76_dev *dev, int band) dev->hw->wiphy->bands[band] = NULL; } +struct mt76_dev * +mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops) +{ + struct ieee80211_hw *hw; + struct mt76_dev *dev; + + hw = ieee80211_alloc_hw(size, ops); + if (!hw) + return NULL; + + dev = hw->priv; + dev->hw = hw; + spin_lock_init(&dev->rx_lock); + spin_lock_init(&dev->lock); + spin_lock_init(&dev->cc_lock); + init_waitqueue_head(&dev->tx_wait); + + return dev; +} +EXPORT_SYMBOL_GPL(mt76_alloc_device); + int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates) { @@ -276,8 +298,6 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, dev_set_drvdata(dev->dev, dev); - spin_lock_init(&dev->lock); - spin_lock_init(&dev->cc_lock); INIT_LIST_HEAD(&dev->txwi_cache); SET_IEEE80211_DEV(hw, dev->dev); @@ -358,12 +378,32 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(mt76_rx); +static bool mt76_has_tx_pending(struct mt76_dev *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) { + if (dev->q_tx[i].queued) + return true; + } + + return false; +} + void mt76_set_channel(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; struct cfg80211_chan_def *chandef = &hw->conf.chandef; struct mt76_channel_state *state; bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; + int timeout = HZ / 5; + + if (offchannel) + set_bit(MT76_OFFCHANNEL, &dev->state); + else + clear_bit(MT76_OFFCHANNEL, &dev->state); + + wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), timeout); if (dev->drv->update_survey) dev->drv->update_survey(dev); @@ -541,15 +581,13 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) return; - if (ps) { + if (ps) set_bit(MT_WCID_FLAG_PS, &wcid->flags); - mt76_stop_tx_queues(dev, sta, true); - } else { + else clear_bit(MT_WCID_FLAG_PS, &wcid->flags); - } - ieee80211_sta_ps_transition(sta, ps); dev->drv->sta_ps(dev, sta, ps); + ieee80211_sta_ps_transition(sta, ps); } void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, @@ -562,6 +600,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, if (queue >= 0) napi = &dev->napi[queue]; + spin_lock(&dev->rx_lock); while ((skb = __skb_dequeue(frames)) != NULL) { if (mt76_check_ccmp_pn(skb)) { dev_kfree_skb(skb); @@ -571,6 +610,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, sta = mt76_rx_convert(skb); ieee80211_rx_napi(dev->hw, sta, skb, napi); } + spin_unlock(&dev->rx_lock); } void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 065ff78059c3..d2166fbf50ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -189,6 +189,7 @@ enum { MT76_STATE_RUNNING, MT76_SCANNING, MT76_RESET, + MT76_OFFCHANNEL, }; struct mt76_hw_cap { @@ -241,6 +242,7 @@ struct mt76_dev { struct device *dev; struct net_device napi_dev; + spinlock_t rx_lock; struct napi_struct napi[__MT_RXQ_MAX]; struct sk_buff_head rx_skb[__MT_RXQ_MAX]; @@ -249,6 +251,8 @@ struct mt76_dev { struct mt76_queue q_rx[__MT_RXQ_MAX]; const struct mt76_queue_ops *queue_ops; + wait_queue_head_t tx_wait; + u8 macaddr[ETH_ALEN]; u32 rev; unsigned long state; @@ -375,6 +379,8 @@ mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) return &msband->chan[idx]; } +struct mt76_dev *mt76_alloc_device(unsigned int size, + const struct ieee80211_ops *ops); int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates); void mt76_unregister_device(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h index 783b8122ec3c..dc12bbdbb2ee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h @@ -39,6 +39,9 @@ #define MT_CALIBRATE_INTERVAL HZ +#define MT_MAX_VIFS 8 +#define MT_VIF_WCID(_n) (254 - ((_n) & 7)) + #include "mt76.h" #include "mt76x2_regs.h" #include "mt76x2_mac.h" @@ -117,7 +120,6 @@ struct mt76x2_dev { u8 beacon_mask; u8 beacon_data_mask; - u32 rev; u32 rxfilter; u16 chainmask; @@ -151,7 +153,7 @@ struct mt76x2_sta { static inline bool is_mt7612(struct mt76x2_dev *dev) { - return (dev->rev >> 16) == 0x7612; + return mt76_chip(&dev->mt76) == 0x7612; } void mt76x2_set_irq_mask(struct mt76x2_dev *dev, u32 clear, u32 set); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h index 47f79d83fcb4..e9d426bbf91a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dma.h @@ -19,7 +19,7 @@ #include "dma.h" -#define MT_TXD_INFO_LEN GENMASK(13, 0) +#define MT_TXD_INFO_LEN GENMASK(15, 0) #define MT_TXD_INFO_NEXT_VLD BIT(16) #define MT_TXD_INFO_TX_BURST BIT(17) #define MT_TXD_INFO_80211 BIT(19) @@ -27,9 +27,8 @@ #define MT_TXD_INFO_CSO BIT(21) #define MT_TXD_INFO_WIV BIT(24) #define MT_TXD_INFO_QSEL GENMASK(26, 25) -#define MT_TXD_INFO_TCO BIT(29) -#define MT_TXD_INFO_UCO BIT(30) -#define MT_TXD_INFO_ICO BIT(31) +#define MT_TXD_INFO_DPORT GENMASK(29, 27) +#define MT_TXD_INFO_TYPE GENMASK(31, 30) #define MT_RX_FCE_INFO_LEN GENMASK(13, 0) #define MT_RX_FCE_INFO_SELF_GEN BIT(15) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c index 5bb50027c1e8..95d5f7d888f0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.c @@ -609,17 +609,13 @@ int mt76x2_get_temp_comp(struct mt76x2_dev *dev, struct mt76x2_temp_comp *t) memset(t, 0, sizeof(*t)); - val = mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1); - if (!(val & MT_EE_NIC_CONF_1_TEMP_TX_ALC)) + if (!mt76x2_temp_tx_alc_enabled(dev)) return -EINVAL; if (!mt76x2_ext_pa_enabled(dev, band)) return -EINVAL; val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8; - if (!(val & BIT(7))) - return -EINVAL; - t->temp_25_ref = val & 0x7f; if (band == NL80211_BAND_5GHZ) { slope = mt76x2_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h index d79122728dca..aa0b0c040375 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_eeprom.h @@ -159,6 +159,12 @@ void mt76x2_read_rx_gain(struct mt76x2_dev *dev); static inline bool mt76x2_temp_tx_alc_enabled(struct mt76x2_dev *dev) { + u16 val; + + val = mt76x2_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G); + if (!(val & BIT(15))) + return false; + return mt76x2_eeprom_get(dev, MT_EE_NIC_CONF_1) & MT_EE_NIC_CONF_1_TEMP_TX_ALC; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c index 934c331d995e..79ab93613e06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c @@ -228,7 +228,7 @@ mt76x2_init_beacon_offsets(struct mt76x2_dev *dev) mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); } -int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) +static int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) { static const u8 null_addr[ETH_ALEN] = {}; const u8 *macaddr = dev->mt76.macaddr; @@ -296,6 +296,9 @@ int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard) for (i = 0; i < 256; i++) mt76x2_mac_wcid_setup(dev, i, 0, NULL); + for (i = 0; i < MT_MAX_VIFS; i++) + mt76x2_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); + for (i = 0; i < 16; i++) for (k = 0; k < 4; k++) mt76x2_mac_shared_key_setup(dev, i, k, NULL); @@ -370,12 +373,12 @@ void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force) /* Wait for MAC to become idle */ for (i = 0; i < 300; i++) { - if (mt76_rr(dev, MT_MAC_STATUS) & - (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) - continue; - - if (mt76_rr(dev, MT_BBP(IBI, 12))) + if ((mt76_rr(dev, MT_MAC_STATUS) & + (MT_MAC_STATUS_RX | MT_MAC_STATUS_TX)) || + mt76_rr(dev, MT_BBP(IBI, 12))) { + udelay(1); continue; + } stopped = true; break; @@ -482,7 +485,10 @@ void mt76x2_set_tx_ackto(struct mt76x2_dev *dev) { u8 ackto, sifs, slottime = dev->slottime; + /* As defined by IEEE 802.11-2007 17.3.8.6 */ slottime += 3 * dev->coverage_class; + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG, MT_XIFS_TIME_CFG_OFDM_SIFS); @@ -632,17 +638,16 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev) .rx_poll_complete = mt76x2_rx_poll_complete, .sta_ps = mt76x2_sta_ps, }; - struct ieee80211_hw *hw; struct mt76x2_dev *dev; + struct mt76_dev *mdev; - hw = ieee80211_alloc_hw(sizeof(*dev), &mt76x2_ops); - if (!hw) + mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops); + if (!mdev) return NULL; - dev = hw->priv; - dev->mt76.dev = pdev; - dev->mt76.hw = hw; - dev->mt76.drv = &drv_ops; + dev = container_of(mdev, struct mt76x2_dev, mt76); + mdev->dev = pdev; + mdev->drv = &drv_ops; mutex_init(&dev->mutex); spin_lock_init(&dev->irq_lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c index d18315652583..b49aea4da2d6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c @@ -301,6 +301,9 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb, u8 wcid; int len; + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return -EINVAL; + if (rxinfo & MT_RXINFO_L2PAD) pad_len += 2; @@ -410,7 +413,6 @@ mt76x2_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, break; default: return -EINVAL; - break; } if (rate & MT_RXWI_RATE_SGI) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h index 8a8a25e32d5f..c048cd06df6b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.h @@ -157,7 +157,6 @@ mt76x2_skb_tx_info(struct sk_buff *skb) return (void *) info->status.status_driver_data; } -int mt76x2_mac_reset(struct mt76x2_dev *dev, bool hard); int mt76x2_mac_start(struct mt76x2_dev *dev); void mt76x2_mac_stop(struct mt76x2_dev *dev, bool force); void mt76x2_mac_resume(struct mt76x2_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c index 73c127f92613..ce90ff999b49 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c @@ -104,7 +104,7 @@ mt76x2_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) idx += 8; mvif->idx = idx; - mvif->group_wcid.idx = 254 - idx; + mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; mt76x2_txq_init(dev, vif->txq); @@ -124,11 +124,14 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) { int ret; + cancel_delayed_work_sync(&dev->cal_work); + + set_bit(MT76_RESET, &dev->mt76.state); + mt76_set_channel(&dev->mt76); tasklet_disable(&dev->pre_tbtt_tasklet); tasklet_disable(&dev->dfs_pd.dfs_tasklet); - cancel_delayed_work_sync(&dev->cal_work); mt76x2_mac_stop(dev, true); ret = mt76x2_phy_set_channel(dev, chandef); @@ -143,6 +146,10 @@ mt76x2_set_channel(struct mt76x2_dev *dev, struct cfg80211_chan_def *chandef) tasklet_enable(&dev->dfs_pd.dfs_tasklet); tasklet_enable(&dev->pre_tbtt_tasklet); + clear_bit(MT76_RESET, &dev->mt76.state); + + mt76_txq_schedule_all(&dev->mt76); + return ret; } @@ -247,8 +254,7 @@ mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int slottime = info->use_short_slot ? 9 : 20; dev->slottime = slottime; - mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, - MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + mt76x2_set_tx_ackto(dev); } mutex_unlock(&dev->mutex); @@ -321,6 +327,7 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76); int idx = msta->wcid.idx; + mt76_stop_tx_queues(&dev->mt76, sta, true); mt76x2_mac_wcid_set_drop(dev, idx, ps); } @@ -452,7 +459,6 @@ mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) clear_bit(MT76_SCANNING, &dev->mt76.state); tasklet_enable(&dev->pre_tbtt_tasklet); - mt76_txq_schedule_all(&dev->mt76); } static void diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c index fcc37eb7ce0b..c1c38ca3330a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2_phy.c @@ -73,16 +73,6 @@ int mt76x2_phy_get_rssi(struct mt76x2_dev *dev, s8 rssi, int chain) return rssi; } -static u8 -mt76x2_txpower_check(int value) -{ - if (value < 0) - return 0; - if (value > 0x2f) - return 0x2f; - return value; -} - static void mt76x2_add_rate_power_offset(struct mt76_rate_power *r, int offset) { @@ -102,6 +92,26 @@ mt76x2_limit_rate_power(struct mt76_rate_power *r, int limit) r->all[i] = limit; } +static int +mt76x2_get_min_rate_power(struct mt76_rate_power *r) +{ + int i; + s8 ret = 0; + + for (i = 0; i < sizeof(r->all); i++) { + if (!r->all[i]) + continue; + + if (ret) + ret = min(ret, r->all[i]); + else + ret = r->all[i]; + } + + return ret; +} + + void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) { enum nl80211_chan_width width = dev->mt76.chandef.width; @@ -109,6 +119,7 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) struct mt76x2_tx_power_info txp; int txp_0, txp_1, delta = 0; struct mt76_rate_power t = {}; + int base_power, gain; mt76x2_get_power_info(dev, &txp, chan); @@ -117,26 +128,32 @@ void mt76x2_phy_set_txpower(struct mt76x2_dev *dev) else if (width == NL80211_CHAN_WIDTH_80) delta = txp.delta_bw80; - if (txp.target_power > dev->txpower_conf) - delta -= txp.target_power - dev->txpower_conf; - mt76x2_get_rate_power(dev, &t, chan); - mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power + - txp.chain[0].delta); + mt76x2_add_rate_power_offset(&t, txp.chain[0].target_power); mt76x2_limit_rate_power(&t, dev->txpower_conf); dev->txpower_cur = mt76x2_get_max_rate_power(&t); - mt76x2_add_rate_power_offset(&t, -(txp.chain[0].target_power + - txp.chain[0].delta + delta)); - dev->target_power = txp.chain[0].target_power; - dev->target_power_delta[0] = txp.chain[0].delta + delta; - dev->target_power_delta[1] = txp.chain[1].delta + delta; - dev->rate_power = t; - txp_0 = mt76x2_txpower_check(txp.chain[0].target_power + - txp.chain[0].delta + delta); + base_power = mt76x2_get_min_rate_power(&t); + delta += base_power - txp.chain[0].target_power; + txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta; + txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta; + + gain = min(txp_0, txp_1); + if (gain < 0) { + base_power -= gain; + txp_0 -= gain; + txp_1 -= gain; + } else if (gain > 0x2f) { + base_power -= gain - 0x2f; + txp_0 = 0x2f; + txp_1 = 0x2f; + } - txp_1 = mt76x2_txpower_check(txp.chain[1].target_power + - txp.chain[1].delta + delta); + mt76x2_add_rate_power_offset(&t, -base_power); + dev->target_power = txp.chain[0].target_power; + dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power; + dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power; + dev->rate_power = t; mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0); mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1); @@ -180,7 +197,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x2_dev *dev) if (mt76x2_channel_silent(dev)) return false; - if (chan->band == NL80211_BAND_2GHZ) + if (chan->band == NL80211_BAND_5GHZ) flag |= BIT(0); if (mt76x2_ext_pa_enabled(dev, chan->band)) @@ -257,7 +274,6 @@ mt76x2_phy_set_txpower_regs(struct mt76x2_dev *dev, enum nl80211_band band) mt76_wr(dev, MT_TX_ALC_CFG_2, 0x1b0f0400); mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476); } - mt76_wr(dev, MT_TX_ALC_CFG_4, 0); if (mt76x2_ext_pa_enabled(dev, band)) pa_mode_adj = 0x04000000; @@ -492,8 +508,10 @@ mt76x2_phy_update_channel_gain(struct mt76x2_dev *dev) u8 gain_delta; int low_gain; - dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + (rssi0 << 8); - dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + (rssi1 << 8); + dev->cal.avg_rssi[0] = (dev->cal.avg_rssi[0] * 15) / 16 + + (rssi0 << 8) / 16; + dev->cal.avg_rssi[1] = (dev->cal.avg_rssi[1] * 15) / 16 + + (rssi1 << 8) / 16; dev->cal.avg_rssi_all = (dev->cal.avg_rssi[0] + dev->cal.avg_rssi[1]) / 512; @@ -661,6 +679,14 @@ int mt76x2_phy_set_channel(struct mt76x2_dev *dev, memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, sizeof(dev->cal.agc_gain_cur)); + /* init default values for temp compensation */ + if (mt76x2_tssi_enabled(dev)) { + mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, + 0x38); + mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, + 0x38); + } + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 4eef69bd8a9e..e96956710fb2 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -332,7 +332,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, if (probe) break; - if (test_bit(MT76_SCANNING, &dev->state) || + if (test_bit(MT76_OFFCHANNEL, &dev->state) || test_bit(MT76_RESET, &dev->state)) return -EBUSY; @@ -385,6 +385,10 @@ restart: bool empty = false; int cur; + if (test_bit(MT76_OFFCHANNEL, &dev->state) || + test_bit(MT76_RESET, &dev->state)) + return -EBUSY; + mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list); if (mtxq->send_bar && mtxq->aggr) { struct ieee80211_txq *txq = mtxq_to_txq(mtxq); @@ -422,12 +426,14 @@ void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq) { int len; + rcu_read_lock(); do { if (hwq->swq_queued >= 4 || list_empty(&hwq->swq)) break; len = mt76_txq_schedule_list(dev, hwq); } while (len > 0); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(mt76_txq_schedule); diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c index d55d7040a56d..148c36d3d2e5 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mac.c +++ b/drivers/net/wireless/mediatek/mt7601u/mac.c @@ -453,7 +453,7 @@ mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, { dev->bcn_freq_off = rxwi->freq_off; dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate); - dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); + ewma_rssi_add(&dev->avg_rssi, -rssi); } static int @@ -503,7 +503,7 @@ u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, if (mt7601u_rx_is_our_beacon(dev, data)) mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi); else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) - dev->avg_rssi = (dev->avg_rssi * 15) / 16 + (rssi << 8); + ewma_rssi_add(&dev->avg_rssi, -rssi); spin_unlock_bh(&dev->con_mon_lock); return len; diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c index 3c9ea40d9584..7b21016012c3 100644 --- a/drivers/net/wireless/mediatek/mt7601u/main.c +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -288,6 +288,12 @@ mt7601u_sw_scan_complete(struct ieee80211_hw *hw, mt7601u_agc_restore(dev); clear_bit(MT7601U_STATE_SCANNING, &dev->state); + + ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, + MT_CALIBRATE_INTERVAL); + if (dev->freq_cal.enabled) + ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, + MT_FREQ_CAL_INIT_DELAY); } static int diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h index 9233744451a9..db317d8c1652 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h +++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h @@ -23,6 +23,7 @@ #include <linux/completion.h> #include <net/mac80211.h> #include <linux/debugfs.h> +#include <linux/average.h> #include "regs.h" @@ -138,6 +139,8 @@ enum { MT7601U_STATE_MORE_STATS, }; +DECLARE_EWMA(rssi, 10, 4); + /** * struct mt7601u_dev - adapter structure * @lock: protects @wcid->tx_rate. @@ -220,7 +223,7 @@ struct mt7601u_dev { s8 bcn_freq_off; u8 bcn_phy_mode; - int avg_rssi; /* starts at 0 and converges */ + struct ewma_rssi avg_rssi; u8 agc_save; diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c index ca09a5d4305e..9d2f9a776ef1 100644 --- a/drivers/net/wireless/mediatek/mt7601u/phy.c +++ b/drivers/net/wireless/mediatek/mt7601u/phy.c @@ -795,6 +795,7 @@ mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate) switch (phy_mode) { case MT_PHY_TYPE_OFDM: tx_rate += 4; + /* fall through */ case MT_PHY_TYPE_CCK: reg = dev->rf_pa_mode[0]; break; @@ -974,6 +975,7 @@ void mt7601u_agc_restore(struct mt7601u_dev *dev) static void mt7601u_agc_tune(struct mt7601u_dev *dev) { u8 val = mt7601u_agc_default(dev); + long avg_rssi; if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) return; @@ -983,9 +985,12 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev) * Rssi updates are only on beacons and U2M so should work... */ spin_lock_bh(&dev->con_mon_lock); - if (dev->avg_rssi <= -70) + avg_rssi = ewma_rssi_read(&dev->avg_rssi); + WARN_ON_ONCE(avg_rssi == 0); + avg_rssi = -avg_rssi; + if (avg_rssi <= -70) val -= 0x20; - else if (dev->avg_rssi <= -60) + else if (avg_rssi <= -60) val -= 0x10; spin_unlock_bh(&dev->con_mon_lock); @@ -1101,7 +1106,7 @@ void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev, /* Start/stop collecting beacon data */ spin_lock_bh(&dev->con_mon_lock); ether_addr_copy(dev->ap_bssid, info->bssid); - dev->avg_rssi = 0; + ewma_rssi_init(&dev->avg_rssi); dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; spin_unlock_bh(&dev->con_mon_lock); diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 0a1604683bab..323e47cea1e2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -27,7 +27,8 @@ enum qtnf_fw_state { QTNF_FW_STATE_FW_DNLD_DONE, QTNF_FW_STATE_BOOT_DONE, QTNF_FW_STATE_ACTIVE, - QTNF_FW_STATE_DEAD, + QTNF_FW_STATE_DETACHED, + QTNF_FW_STATE_EP_DEAD, }; struct qtnf_bus; diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 0398bece5782..220e2b710208 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -177,8 +177,6 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) vif->netdev->ieee80211_ptr = NULL; vif->netdev = NULL; vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - eth_zero_addr(vif->mac_addr); - eth_zero_addr(vif->bssid); return 0; } @@ -216,10 +214,12 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, } eth_zero_addr(vif->mac_addr); + eth_zero_addr(vif->bssid); vif->bss_priority = QTNF_DEF_BSS_PRIORITY; + vif->sta_state = QTNF_STA_DISCONNECTED; + memset(&vif->wdev, 0, sizeof(vif->wdev)); vif->wdev.wiphy = wiphy; vif->wdev.iftype = type; - vif->sta_state = QTNF_STA_DISCONNECTED; break; default: pr_err("MAC%u: unsupported IF type %d\n", mac->macid, type); @@ -255,8 +255,6 @@ err_mac: qtnf_cmd_send_del_intf(vif); err_cmd: vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - eth_zero_addr(vif->mac_addr); - eth_zero_addr(vif->bssid); return ERR_PTR(-EFAULT); } @@ -651,28 +649,37 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, { struct qtnf_wmac *mac = wiphy_priv(wiphy); struct qtnf_vif *vif; - int ret; + int ret = 0; vif = qtnf_mac_get_base_vif(mac); if (!vif) { pr_err("MAC%u: primary VIF is not configured\n", mac->macid); - return -EFAULT; + ret = -EFAULT; + goto out; } - if (vif->wdev.iftype != NL80211_IFTYPE_STATION) - return -EOPNOTSUPP; + if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { + ret = -EOPNOTSUPP; + goto out; + } + + qtnf_scan_done(mac, true); if (vif->sta_state == QTNF_STA_DISCONNECTED) - return 0; + goto out; ret = qtnf_cmd_send_disconnect(vif, reason_code); if (ret) { pr_err("VIF%u.%u: failed to disconnect\n", mac->macid, vif->vifid); - return ret; + goto out; } - return 0; +out: + if (vif->sta_state == QTNF_STA_CONNECTING) + vif->sta_state = QTNF_STA_DISCONNECTED; + + return ret; } static int @@ -813,6 +820,9 @@ static int qtnf_start_radar_detection(struct wiphy *wiphy, struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); int ret; + if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) + return -ENOTSUPP; + ret = qtnf_cmd_start_cac(vif, chandef, cac_time_ms); if (ret) pr_err("%s: failed to start CAC ret=%d\n", ndev->name, ret); @@ -909,6 +919,9 @@ struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus) { struct wiphy *wiphy; + if (bus->hw_info.hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) + qtn_cfg80211_ops.start_radar_detection = NULL; + wiphy = wiphy_new(&qtn_cfg80211_ops, sizeof(struct qtnf_wmac)); if (!wiphy) return NULL; @@ -949,6 +962,7 @@ qtnf_wiphy_setup_if_comb(struct wiphy *wiphy, struct qtnf_mac_info *mac_info) int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) { struct wiphy *wiphy = priv_to_wiphy(mac); + struct qtnf_mac_info *macinfo = &mac->macinfo; int ret; if (!wiphy) { @@ -956,20 +970,20 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) return -EFAULT; } - wiphy->frag_threshold = mac->macinfo.frag_thr; - wiphy->rts_threshold = mac->macinfo.rts_thr; - wiphy->retry_short = mac->macinfo.sretry_limit; - wiphy->retry_long = mac->macinfo.lretry_limit; - wiphy->coverage_class = mac->macinfo.coverage_class; + wiphy->frag_threshold = macinfo->frag_thr; + wiphy->rts_threshold = macinfo->rts_thr; + wiphy->retry_short = macinfo->sretry_limit; + wiphy->retry_long = macinfo->lretry_limit; + wiphy->coverage_class = macinfo->coverage_class; wiphy->max_scan_ssids = QTNF_MAX_SSID_LIST_LENGTH; wiphy->max_scan_ie_len = QTNF_MAX_VSIE_LEN; wiphy->mgmt_stypes = qtnf_mgmt_stypes; wiphy->max_remain_on_channel_duration = 5000; - wiphy->max_acl_mac_addrs = mac->macinfo.max_acl_mac_addrs; + wiphy->max_acl_mac_addrs = macinfo->max_acl_mac_addrs; wiphy->max_num_csa_counters = 2; - ret = qtnf_wiphy_setup_if_comb(wiphy, &mac->macinfo); + ret = qtnf_wiphy_setup_if_comb(wiphy, macinfo); if (ret) goto out; @@ -982,15 +996,18 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_HAS_CHANNEL_SWITCH; + if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD); + wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; - wiphy->available_antennas_tx = mac->macinfo.num_tx_chain; - wiphy->available_antennas_rx = mac->macinfo.num_rx_chain; + wiphy->available_antennas_tx = macinfo->num_tx_chain; + wiphy->available_antennas_rx = macinfo->num_rx_chain; - wiphy->max_ap_assoc_sta = mac->macinfo.max_ap_assoc_sta; - wiphy->ht_capa_mod_mask = &mac->macinfo.ht_cap_mod_mask; - wiphy->vht_capa_mod_mask = &mac->macinfo.vht_cap_mod_mask; + wiphy->max_ap_assoc_sta = macinfo->max_ap_assoc_sta; + wiphy->ht_capa_mod_mask = &macinfo->ht_cap_mod_mask; + wiphy->vht_capa_mod_mask = &macinfo->vht_cap_mod_mask; ether_addr_copy(wiphy->perm_addr, mac->macaddr); diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index deca0060eb27..5eb143667539 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -55,6 +55,28 @@ static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp, return 0; } +static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode) +{ + switch (qcode) { + case QLINK_CMD_RESULT_OK: + return 0; + case QLINK_CMD_RESULT_INVALID: + return -EINVAL; + case QLINK_CMD_RESULT_ENOTSUPP: + return -ENOTSUPP; + case QLINK_CMD_RESULT_ENOTFOUND: + return -ENOENT; + case QLINK_CMD_RESULT_EALREADY: + return -EALREADY; + case QLINK_CMD_RESULT_EADDRINUSE: + return -EADDRINUSE; + case QLINK_CMD_RESULT_EADDRNOTAVAIL: + return -EADDRNOTAVAIL; + default: + return -EFAULT; + } +} + static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, struct sk_buff *cmd_skb, struct sk_buff **response_skb, @@ -80,6 +102,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n", mac_id, vif_id, le16_to_cpu(cmd->cmd_id), bus->fw_state); + dev_kfree_skb(cmd_skb); return -ENODEV; } @@ -810,10 +833,10 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, if (unlikely(ret)) goto out; - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { + ret = qtnf_cmd_resp_result_decode(res_code); + if (ret) { pr_err("VIF%u.%u: CMD %d failed: %u\n", vif->mac->macid, vif->vifid, cmd_type, res_code); - ret = -EFAULT; goto out; } @@ -2316,13 +2339,11 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, else eth_zero_addr(cmd->prev_bssid); - if ((sme->bg_scan_period > 0) && - (sme->bg_scan_period <= QTNF_MAX_BG_SCAN_PERIOD)) + if ((sme->bg_scan_period >= 0) && + (sme->bg_scan_period <= SHRT_MAX)) cmd->bg_scan_period = cpu_to_le16(sme->bg_scan_period); - else if (sme->bg_scan_period == -1) - cmd->bg_scan_period = cpu_to_le16(QTNF_DEFAULT_BG_SCAN_PERIOD); else - cmd->bg_scan_period = 0; /* disabled */ + cmd->bg_scan_period = cpu_to_le16(-1); /* use default value */ if (sme->flags & ASSOC_REQ_DISABLE_HT) connect_flags |= QLINK_STA_CONNECT_DISABLE_HT; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 69a7d56f7e58..cf9274add26d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -58,11 +58,6 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, struct station_parameters *params); int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, struct station_del_parameters *params); - -int qtnf_cmd_resp_parse(struct qtnf_bus *bus, struct sk_buff *resp_skb); -int qtnf_cmd_resp_check(const struct qtnf_vif *vif, - const struct sk_buff *resp_skb, u16 cmd_id, - u16 *result, const u8 **payload, size_t *payload_size); int qtnf_cmd_send_scan(struct qtnf_wmac *mac); int qtnf_cmd_send_connect(struct qtnf_vif *vif, struct cfg80211_connect_params *sme); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index cf26c15a84f8..a6a450984f9a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -76,7 +76,7 @@ static int qtnf_netdev_close(struct net_device *ndev) /* Netdev handler for data transmission. */ -static int +static netdev_tx_t qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct qtnf_vif *vif; @@ -394,7 +394,6 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif, dev = alloc_netdev_mqs(sizeof(struct qtnf_vif *), name, name_assign_type, ether_setup, 1, 1); if (!dev) { - memset(&vif->wdev, 0, sizeof(vif->wdev)); vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; return -ENOMEM; } @@ -629,7 +628,7 @@ void qtnf_core_detach(struct qtnf_bus *bus) if (bus->fw_state == QTNF_FW_STATE_ACTIVE) qtnf_cmd_send_deinit_fw(bus); - bus->fw_state = QTNF_FW_STATE_DEAD; + bus->fw_state = QTNF_FW_STATE_DETACHED; if (bus->workqueue) { flush_workqueue(bus->workqueue); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 3b884c80b6ab..214435448335 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -44,8 +44,6 @@ #define QTNF_MAX_VSIE_LEN 255 #define QTNF_MAX_INTF 8 #define QTNF_MAX_EVENT_QUEUE_LEN 255 -#define QTNF_DEFAULT_BG_SCAN_PERIOD 300 -#define QTNF_MAX_BG_SCAN_PERIOD 0xffff #define QTNF_SCAN_TIMEOUT_SEC 15 #define QTNF_DEF_BSS_PRIORITY 0 diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index bcd415f96412..68da81bec4e9 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -34,12 +34,13 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, { const u8 *sta_addr; u16 frame_control; - struct station_info sinfo = { 0 }; + struct station_info *sinfo; size_t payload_len; u16 tlv_type; u16 tlv_value_len; size_t tlv_full_len; const struct qlink_tlv_hdr *tlv; + int ret = 0; if (unlikely(len < sizeof(*sta_assoc))) { pr_err("VIF%u.%u: payload is too short (%u < %zu)\n", @@ -53,6 +54,10 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, return -EPROTO; } + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + sta_addr = sta_assoc->sta_addr; frame_control = le16_to_cpu(sta_assoc->frame_control); @@ -61,9 +66,9 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, qtnf_sta_list_add(vif, sta_addr); - sinfo.assoc_req_ies = NULL; - sinfo.assoc_req_ies_len = 0; - sinfo.generation = vif->generation; + sinfo->assoc_req_ies = NULL; + sinfo->assoc_req_ies_len = 0; + sinfo->generation = vif->generation; payload_len = len - sizeof(*sta_assoc); tlv = (const struct qlink_tlv_hdr *)sta_assoc->ies; @@ -73,23 +78,27 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, tlv_value_len = le16_to_cpu(tlv->len); tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr); - if (tlv_full_len > payload_len) - return -EINVAL; + if (tlv_full_len > payload_len) { + ret = -EINVAL; + goto out; + } if (tlv_type == QTN_TLV_ID_IE_SET) { const struct qlink_tlv_ie_set *ie_set; unsigned int ie_len; - if (payload_len < sizeof(*ie_set)) - return -EINVAL; + if (payload_len < sizeof(*ie_set)) { + ret = -EINVAL; + goto out; + } ie_set = (const struct qlink_tlv_ie_set *)tlv; ie_len = tlv_value_len - (sizeof(*ie_set) - sizeof(ie_set->hdr)); if (ie_set->type == QLINK_IE_SET_ASSOC_REQ && ie_len) { - sinfo.assoc_req_ies = ie_set->ie_data; - sinfo.assoc_req_ies_len = ie_len; + sinfo->assoc_req_ies = ie_set->ie_data; + sinfo->assoc_req_ies_len = ie_len; } } @@ -97,13 +106,17 @@ qtnf_event_handle_sta_assoc(struct qtnf_wmac *mac, struct qtnf_vif *vif, tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); } - if (payload_len) - return -EINVAL; + if (payload_len) { + ret = -EINVAL; + goto out; + } - cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, &sinfo, + cfg80211_new_sta(vif->netdev, sta_assoc->sta_addr, sinfo, GFP_KERNEL); - return 0; +out: + kfree(sinfo); + return ret; } static int @@ -198,11 +211,9 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif, return -EPROTO; } - if (vif->sta_state != QTNF_STA_CONNECTED) { - pr_err("VIF%u.%u: BSS_LEAVE event when STA is not connected\n", - vif->mac->macid, vif->vifid); - return -EPROTO; - } + if (vif->sta_state != QTNF_STA_CONNECTED) + pr_warn("VIF%u.%u: BSS_LEAVE event when STA is not connected\n", + vif->mac->macid, vif->vifid); pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid); @@ -443,6 +454,17 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif, cfg80211_cac_event(vif->netdev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); break; + case QLINK_RADAR_CAC_STARTED: + if (vif->wdev.cac_started) + break; + + if (!wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_DFS_OFFLOAD)) + break; + + cfg80211_cac_event(vif->netdev, &chandef, + NL80211_RADAR_CAC_STARTED, GFP_KERNEL); + break; default: pr_warn("%s: unhandled radar event %u\n", vif->netdev->name, ev->event); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index f117904d9120..3120d49df565 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -751,8 +751,16 @@ tx_done: static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) { struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); + int ret; + + ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); + + if (ret == -ETIMEDOUT) { + pr_err("EP firmware is dead\n"); + bus->fw_state = QTNF_FW_STATE_EP_DEAD; + } - return qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); + return ret; } static irqreturn_t qtnf_interrupt(int irq, void *data) @@ -1185,6 +1193,10 @@ static void qtnf_fw_work_handler(struct work_struct *work) if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY, QTN_FW_DL_TIMEOUT_MS)) { pr_err("card is not ready\n"); + + if (!flashboot) + release_firmware(fw); + goto fw_load_fail; } @@ -1234,7 +1246,7 @@ static void qtnf_fw_work_handler(struct work_struct *work) goto fw_load_exit; fw_load_fail: - bus->fw_state = QTNF_FW_STATE_DEAD; + bus->fw_state = QTNF_FW_STATE_DETACHED; fw_load_exit: complete(&bus->firmware_init_complete); @@ -1404,7 +1416,8 @@ static void qtnf_pcie_remove(struct pci_dev *pdev) wait_for_completion(&bus->firmware_init_complete); - if (bus->fw_state == QTNF_FW_STATE_ACTIVE) + if (bus->fw_state == QTNF_FW_STATE_ACTIVE || + bus->fw_state == QTNF_FW_STATE_EP_DEAD) qtnf_core_detach(bus); priv = get_bus_priv(bus); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 9bf3ae4d1b3b..f85deda703fb 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -68,10 +68,12 @@ struct qlink_msg_header { * @QLINK_HW_CAPAB_STA_INACT_TIMEOUT: device implements a logic to kick-out * associated STAs due to inactivity. Inactivity timeout period is taken * from QLINK_CMD_START_AP parameters. + * @QLINK_HW_CAPAB_DFS_OFFLOAD: device implements DFS offload functionality */ enum qlink_hw_capab { - QLINK_HW_CAPAB_REG_UPDATE = BIT(0), - QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), + QLINK_HW_CAPAB_REG_UPDATE = BIT(0), + QLINK_HW_CAPAB_STA_INACT_TIMEOUT = BIT(1), + QLINK_HW_CAPAB_DFS_OFFLOAD = BIT(2), }; enum qlink_iface_type { @@ -672,6 +674,8 @@ enum qlink_cmd_result { QLINK_CMD_RESULT_ENOTSUPP, QLINK_CMD_RESULT_ENOTFOUND, QLINK_CMD_RESULT_EALREADY, + QLINK_CMD_RESULT_EADDRINUSE, + QLINK_CMD_RESULT_EADDRNOTAVAIL, }; /** @@ -1031,6 +1035,7 @@ enum qlink_radar_event { QLINK_RADAR_CAC_ABORTED, QLINK_RADAR_NOP_FINISHED, QLINK_RADAR_PRE_CAC_EXPIRED, + QLINK_RADAR_CAC_STARTED, }; /** diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.c b/drivers/net/wireless/quantenna/qtnfmac/trans.c index ccddfebc508a..345f34ec9750 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/trans.c +++ b/drivers/net/wireless/quantenna/qtnfmac/trans.c @@ -35,8 +35,10 @@ int qtnf_trans_send_cmd_with_resp(struct qtnf_bus *bus, struct sk_buff *cmd_skb, bool resp_not_handled = true; struct sk_buff *resp_skb = NULL; - if (unlikely(!response_skb)) + if (unlikely(!response_skb)) { + dev_kfree_skb(cmd_skb); return -EFAULT; + } spin_lock(&ctl_node->resp_lock); ctl_node->seq_num++; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h index 6a8c93fb6a43..b05ed2f3025a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h @@ -94,6 +94,7 @@ #define REV_RT3390E 0x0211 #define REV_RT3593E 0x0211 #define REV_RT5390F 0x0502 +#define REV_RT5370G 0x0503 #define REV_RT5390R 0x1502 #define REV_RT5592C 0x0221 @@ -1193,10 +1194,10 @@ #define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0) #define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00) #define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000) -#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000) -#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000) -#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000) -#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000) +#define TX_PWR_CFG_3_UNKNOWN1 FIELD32(0x000f0000) +#define TX_PWR_CFG_3_UNKNOWN2 FIELD32(0x00f00000) +#define TX_PWR_CFG_3_UNKNOWN3 FIELD32(0x0f000000) +#define TX_PWR_CFG_3_UNKNOWN4 FIELD32(0xf0000000) /* bits for 3T devices */ #define TX_PWR_CFG_3_MCS12_CH0 FIELD32(0x0000000f) #define TX_PWR_CFG_3_MCS12_CH1 FIELD32(0x000000f0) @@ -1216,10 +1217,10 @@ * TX_PWR_CFG_4: */ #define TX_PWR_CFG_4 0x1324 -#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f) -#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0) -#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00) -#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000) +#define TX_PWR_CFG_4_UNKNOWN5 FIELD32(0x0000000f) +#define TX_PWR_CFG_4_UNKNOWN6 FIELD32(0x000000f0) +#define TX_PWR_CFG_4_UNKNOWN7 FIELD32(0x00000f00) +#define TX_PWR_CFG_4_UNKNOWN8 FIELD32(0x0000f000) /* bits for 3T devices */ #define TX_PWR_CFG_4_STBC4_CH0 FIELD32(0x0000000f) #define TX_PWR_CFG_4_STBC4_CH1 FIELD32(0x000000f0) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 429d07b651dd..a567bc273ffc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1557,12 +1557,13 @@ static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); } -int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, +int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - int wcid; - struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); + struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; + struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); + int wcid; /* * Limit global maximum TX AMPDU length to smallest value of all @@ -1608,8 +1609,10 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(rt2800_sta_add); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta) +int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { + struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); int wcid = sta_priv->wcid; @@ -6220,8 +6223,9 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); } - /* This chip has hardware antenna diversity*/ - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + /* These chips have hardware RX antenna diversity */ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) || + rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) { rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ @@ -8748,7 +8752,9 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00dev->default_ant.rx = ANTENNA_A; } - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + /* These chips have hardware RX antenna diversity */ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) || + rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) { rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */ rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */ } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 275e3969abdd..51d9c2a932cc 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -208,9 +208,10 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key); -int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif, +int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); -int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta); +int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags); void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index 1123e2bed803..e1a7ed7e4892 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -600,6 +600,7 @@ void rt2800mmio_kick_queue(struct data_queue *queue) case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: + WARN_ON_ONCE(rt2x00queue_empty(queue)); entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), entry->entry_idx); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 1172eefd1c1a..71b1affc3885 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -311,8 +311,8 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = { .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, - .sta_add = rt2x00mac_sta_add, - .sta_remove = rt2x00mac_sta_remove, + .sta_add = rt2800_sta_add, + .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, @@ -377,8 +377,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, - .sta_add = rt2800_sta_add, - .sta_remove = rt2800_sta_remove, }; static const struct rt2x00_ops rt2800pci_ops = { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c index 6848ebc83534..a502816214ab 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c @@ -150,8 +150,8 @@ static const struct ieee80211_ops rt2800soc_mac80211_ops = { .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, - .sta_add = rt2x00mac_sta_add, - .sta_remove = rt2x00mac_sta_remove, + .sta_add = rt2800_sta_add, + .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, @@ -216,8 +216,6 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = { .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, - .sta_add = rt2800_sta_add, - .sta_remove = rt2800_sta_remove, }; static const struct rt2x00_ops rt2800soc_ops = { diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index d901a41d36e4..98a7313fea4a 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -797,8 +797,8 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = { .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, - .sta_add = rt2x00mac_sta_add, - .sta_remove = rt2x00mac_sta_remove, + .sta_add = rt2800_sta_add, + .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, @@ -858,8 +858,6 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, - .sta_add = rt2800_sta_add, - .sta_remove = rt2800_sta_remove, }; static void rt2800usb_queue_init(struct data_queue *queue) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 1f38c338ca7a..a279a4363bc1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1457,10 +1457,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, #else #define rt2x00mac_set_key NULL #endif /* CONFIG_RT2X00_LIB_CRYPTO */ -int rt2x00mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index a971bc7a6b63..c380c1f56ba6 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -739,8 +739,7 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return; tx_queue_for_each(rt2x00dev, queue) - if (!rt2x00queue_empty(queue)) - rt2x00queue_flush_queue(queue, drop); + rt2x00queue_flush_queue(queue, drop); } EXPORT_SYMBOL_GPL(rt2x00mac_flush); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c index 7ddee980048b..710e9641552e 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c @@ -999,6 +999,8 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) (queue->qid == QID_AC_BE) || (queue->qid == QID_AC_BK); + if (rt2x00queue_empty(queue)) + return; /* * If we are not supposed to drop any pending diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index 38b2ba1ac6f8..380e86f9e00b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1267,8 +1267,8 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index c4b86a84a721..26b674aca125 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -1175,8 +1175,8 @@ static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 718a73c623a7..505ab1b055ff 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -3406,8 +3406,8 @@ void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_ecc = result[candidate][7]; dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); dev_dbg(dev, - "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " - "ecc=%x\n ", __func__, reg_e94, reg_e9c, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n", + __func__, reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); path_a_ok = true; path_b_ok = true; diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 762a29cdf7ad..39c817eddd78 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -396,7 +396,6 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, AMPDU_AGGREGATION); - ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); @@ -572,8 +571,9 @@ int rtl_init_core(struct ieee80211_hw *hw) spin_lock_init(&rtlpriv->locks.iqk_lock); /* <5> init list */ INIT_LIST_HEAD(&rtlpriv->entry_list); - INIT_LIST_HEAD(&rtlpriv->c2hcmd_list); INIT_LIST_HEAD(&rtlpriv->scan_list.list); + skb_queue_head_init(&rtlpriv->tx_report.queue); + skb_queue_head_init(&rtlpriv->c2hcmd_queue); rtlmac->link_state = MAC80211_NOLINK; @@ -585,11 +585,14 @@ int rtl_init_core(struct ieee80211_hw *hw) EXPORT_SYMBOL_GPL(rtl_init_core); static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw); +static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw, + bool timeout); void rtl_deinit_core(struct ieee80211_hw *hw) { rtl_c2hcmd_launcher(hw, 0); rtl_free_entries_from_scan_list(hw); + rtl_free_entries_from_ack_queue(hw, false); } EXPORT_SYMBOL_GPL(rtl_deinit_core); @@ -1575,22 +1578,52 @@ end: } EXPORT_SYMBOL_GPL(rtl_is_special_data); +void rtl_tx_ackqueue(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tx_report *tx_report = &rtlpriv->tx_report; + + __skb_queue_tail(&tx_report->queue, skb); +} +EXPORT_SYMBOL_GPL(rtl_tx_ackqueue); + +static void rtl_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, + bool ack) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_tx_info *info; + + info = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(info); + if (ack) { + RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD, + "tx report: ack\n"); + info->flags |= IEEE80211_TX_STAT_ACK; + } else { + RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD, + "tx report: not ack\n"); + info->flags &= ~IEEE80211_TX_STAT_ACK; + } + ieee80211_tx_status_irqsafe(hw, skb); +} + bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb) { u16 ether_type; const u8 *ether_type_ptr; + __le16 fc = rtl_get_fc(skb); ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, true); ether_type = be16_to_cpup((__be16 *)ether_type_ptr); - /* EAPOL */ - if (ether_type == ETH_P_PAE) + if (ether_type == ETH_P_PAE || ieee80211_is_nullfunc(fc)) return true; return false; } -static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw) +static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw, + struct rtlwifi_tx_info *tx_info) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tx_report *tx_report = &rtlpriv->tx_report; @@ -1604,29 +1637,33 @@ static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw) tx_report->last_sent_sn = sn; tx_report->last_sent_time = jiffies; - + tx_info->sn = sn; + tx_info->send_time = tx_report->last_sent_time; RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG, "Send TX-Report sn=0x%X\n", sn); return sn; } -void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, - struct ieee80211_hw *hw) +void rtl_set_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, + struct ieee80211_hw *hw, struct rtlwifi_tx_info *tx_info) { if (ptcb_desc->use_spe_rpt) { - u16 sn = rtl_get_tx_report_sn(hw); + u16 sn = rtl_get_tx_report_sn(hw, tx_info); SET_TX_DESC_SPE_RPT(pdesc, 1); SET_TX_DESC_SW_DEFINE(pdesc, sn); } } -EXPORT_SYMBOL_GPL(rtl_get_tx_report); +EXPORT_SYMBOL_GPL(rtl_set_tx_report); void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_tx_report *tx_report = &rtlpriv->tx_report; + struct rtlwifi_tx_info *tx_info; + struct sk_buff_head *queue = &tx_report->queue; + struct sk_buff *skb; u16 sn; u8 st, retry; @@ -1642,6 +1679,14 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len) tx_report->last_recv_sn = sn; + skb_queue_walk(queue, skb) { + tx_info = rtl_tx_skb_cb_info(skb); + if (tx_info->sn == sn) { + skb_unlink(skb, queue); + rtl_tx_status(hw, skb, st == 0); + break; + } + } RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG, "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n", st, sn, retry); @@ -1909,6 +1954,25 @@ static void rtl_free_entries_from_scan_list(struct ieee80211_hw *hw) } } +static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw, + bool chk_timeout) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_tx_report *tx_report = &rtlpriv->tx_report; + struct sk_buff_head *queue = &tx_report->queue; + struct sk_buff *skb, *tmp; + struct rtlwifi_tx_info *tx_info; + + skb_queue_walk_safe(queue, skb, tmp) { + tx_info = rtl_tx_skb_cb_info(skb); + if (chk_timeout && + time_after(tx_info->send_time + HZ, jiffies)) + continue; + skb_unlink(skb, queue); + rtl_tx_status(hw, skb, false); + } +} + void rtl_scan_list_expire(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -2173,6 +2237,9 @@ label_lps_done: /* <6> scan list */ rtl_scan_list_expire(hw); + + /* <7> check ack queue */ + rtl_free_entries_from_ack_queue(hw, true); } void rtl_watch_dog_timer_callback(struct timer_list *t) @@ -2195,81 +2262,122 @@ void rtl_fwevt_wq_callback(void *data) rtlpriv->cfg->ops->c2h_command_handle(hw); } -void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - unsigned long flags; - struct rtl_c2hcmd *c2hcmd; +static void rtl_c2h_content_parsing(struct ieee80211_hw *hw, + struct sk_buff *skb); - c2hcmd = kmalloc(sizeof(*c2hcmd), - in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); +static bool rtl_c2h_fast_cmd(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + u8 cmd_id = GET_C2H_CMD_ID(skb->data); - if (!c2hcmd) - goto label_err; + switch (cmd_id) { + case C2H_BT_MP: + return true; + default: + break; + } - c2hcmd->val = kmalloc(len, - in_interrupt() ? GFP_ATOMIC : GFP_KERNEL); + return false; +} - if (!c2hcmd->val) - goto label_err2; +void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned long flags; - /* fill data */ - c2hcmd->tag = tag; - c2hcmd->len = len; - memcpy(c2hcmd->val, val, len); + if (rtl_c2h_fast_cmd(hw, skb)) { + rtl_c2h_content_parsing(hw, skb); + return; + } /* enqueue */ spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - list_add_tail(&c2hcmd->list, &rtlpriv->c2hcmd_list); + __skb_queue_tail(&rtlpriv->c2hcmd_queue, skb); spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); /* wake up wq */ queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0); - - return; - -label_err2: - kfree(c2hcmd); - -label_err: - RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING, - "C2H cmd enqueue fail.\n"); } EXPORT_SYMBOL(rtl_c2hcmd_enqueue); +static void rtl_c2h_content_parsing(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; + const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; + u8 cmd_id, cmd_seq, cmd_len; + u8 *cmd_buf = NULL; + + cmd_id = GET_C2H_CMD_ID(skb->data); + cmd_seq = GET_C2H_SEQ(skb->data); + cmd_len = skb->len - C2H_DATA_OFFSET; + cmd_buf = GET_C2H_DATA_PTR(skb->data); + + switch (cmd_id) { + case C2H_DBG: + RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n"); + break; + case C2H_TXBF: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_TXBF!!\n"); + break; + case C2H_TX_REPORT: + rtl_tx_report_handler(hw, cmd_buf, cmd_len); + break; + case C2H_RA_RPT: + if (hal_ops->c2h_ra_report_handler) + hal_ops->c2h_ra_report_handler(hw, cmd_buf, cmd_len); + break; + case C2H_BT_INFO: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_BT_INFO!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, cmd_buf, cmd_len); + break; + case C2H_BT_MP: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, cmd_buf, cmd_len); + break; + default: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id); + break; + } +} + void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct sk_buff *skb; unsigned long flags; - struct rtl_c2hcmd *c2hcmd; int i; for (i = 0; i < 200; i++) { /* dequeue a task */ spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags); - c2hcmd = list_first_entry_or_null(&rtlpriv->c2hcmd_list, - struct rtl_c2hcmd, list); - - if (c2hcmd) - list_del(&c2hcmd->list); + skb = __skb_dequeue(&rtlpriv->c2hcmd_queue); spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags); /* do it */ - if (!c2hcmd) + if (!skb) break; - if (rtlpriv->cfg->ops->c2h_content_parsing && exec) - rtlpriv->cfg->ops->c2h_content_parsing(hw, - c2hcmd->tag, c2hcmd->len, c2hcmd->val); + RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n", + *((u8 *)skb->cb)); + RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_DMESG, + "C2H data: ", skb->data, skb->len); - /* free */ - kfree(c2hcmd->val); + if (exec) + rtl_c2h_content_parsing(hw, skb); - kfree(c2hcmd); + /* free */ + dev_kfree_skb_any(skb); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index acc924635818..912f205779c3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -130,9 +130,10 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb); u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, bool is_enc); +void rtl_tx_ackqueue(struct ieee80211_hw *hw, struct sk_buff *skb); bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb); -void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, - struct ieee80211_hw *hw); +void rtl_set_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc, + struct ieee80211_hw *hw, struct rtlwifi_tx_info *info); void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len); bool rtl_check_tx_report_acked(struct ieee80211_hw *hw); @@ -160,7 +161,7 @@ void rtl_watchdog_wq_callback(void *data); void rtl_fwevt_wq_callback(void *data); void rtl_c2hcmd_wq_callback(void *data); void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec); -void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val); +void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb); u8 rtl_mrate_idx_to_arfr_id(struct ieee80211_hw *hw, u8 rate_index, enum wireless_mode wirelessmode); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index 8fce371749d3..f22fec093f1d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -1783,7 +1783,7 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], PsTdma type dismatch!!!, "); + "[BTCoex], PsTdma type mismatch!!!, "); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "curPsTdma=%d, recordPsTdma=%d\n", coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 73ec31972944..df3facc8e5a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -2766,7 +2766,7 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) { bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", + "[BTCoex], PsTdma type mismatch!!!, curPsTdma=%d, recordPsTdma=%d\n", coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); @@ -2876,25 +2876,10 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist) btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 13); /* sw mechanism */ - if (BTC_WIFI_BW_HT40 == wifi_bw) { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, true, true, - false, false); - } - } else { - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } else { - btc8723b2ant_sw_mechanism(btcoexist, false, true, - false, false); - } - } + if (wifi_bw == BTC_WIFI_BW_HT40) + btc8723b2ant_sw_mechanism(btcoexist, true, true, false, false); + else + btc8723b2ant_sw_mechanism(btcoexist, false, true, false, false); } /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index 202597cf8915..b5d65872db84 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -1584,11 +1584,7 @@ static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist, /* tdma and coex table */ btc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5); - if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN == - wifi_status) - btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); - else - btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); + btc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1); } static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist, @@ -1991,16 +1987,9 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist) wifi_rssi_state = btc8821a1ant_wifi_rssi_state(btcoexist, 1, 2, 30, 0); - if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) || - (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) { - btc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 0, 1); - } else { - btc8821a1ant_limited_tx(btcoexist, - NORMAL_EXEC, 1, 1, - 0, 1); - } + btc8821a1ant_limited_tx(btcoexist, + NORMAL_EXEC, 1, 1, + 0, 1); } else { btc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC, 0, 0, 0, 0); diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 2202d5e18977..01a9d303603b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -2614,7 +2614,7 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist, bool scan = false, link = false, roam = false; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", + "[BTCoex], PsTdma type mismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n", coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 57bb8f049e59..ae13bcfb3bf0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -619,12 +619,15 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) rtlpriv->link_info.tidtx_inperiod[tid]++; info = IEEE80211_SKB_CB(skb); - ieee80211_tx_info_clear_status(info); - info->flags |= IEEE80211_TX_STAT_ACK; - /*info->status.rates[0].count = 1; */ - - ieee80211_tx_status_irqsafe(hw, skb); + if (likely(!ieee80211_is_nullfunc(fc))) { + ieee80211_tx_info_clear_status(info); + info->flags |= IEEE80211_TX_STAT_ACK; + /*info->status.rates[0].count = 1; */ + ieee80211_tx_status_irqsafe(hw, skb); + } else { + rtl_tx_ackqueue(hw, skb); + } if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) { RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, @@ -827,9 +830,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) goto new_trx_end; } /* handle command packet here */ - if (rtlpriv->cfg->ops->rx_command_packet && - rtlpriv->cfg->ops->rx_command_packet(hw, &stats, skb)) { - dev_kfree_skb_any(skb); + if (stats.packet_report_type == C2H_PACKET) { + rtl_c2hcmd_enqueue(hw, skb); goto new_trx_end; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h index 0532b9852444..45c866d3ca88 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h @@ -137,13 +137,6 @@ enum version_8188e { VERSION_UNKNOWN = 0xFF, }; -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, -}; - enum rtl819x_loopback_e { RTL819X_NO_LOOPBACK = 0, RTL819X_MAC_LOOPBACK = 1, @@ -183,31 +176,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_READ_MACREG = 0, - HAL_FW_C2H_CMD_READ_BBREG = 1, - HAL_FW_C2H_CMD_READ_RFREG = 2, - HAL_FW_C2H_CMD_READ_EEPROM = 3, - HAL_FW_C2H_CMD_READ_EFUSE = 4, - HAL_FW_C2H_CMD_READ_CAM = 5, - HAL_FW_C2H_CMD_GET_BASICRATE = 6, - HAL_FW_C2H_CMD_GET_DATARATE = 7, - HAL_FW_C2H_CMD_SURVEY = 8, - HAL_FW_C2H_CMD_SURVEYDONE = 9, - HAL_FW_C2H_CMD_JOINBSS = 10, - HAL_FW_C2H_CMD_ADDSTA = 11, - HAL_FW_C2H_CMD_DELSTA = 12, - HAL_FW_C2H_CMD_ATIMDONE = 13, - HAL_FW_C2H_CMD_TX_REPORT = 14, - HAL_FW_C2H_CMD_CCX_REPORT = 15, - HAL_FW_C2H_CMD_DTM_REPORT = 16, - HAL_FW_C2H_CMD_TX_RATE_STATISTICS = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 82681b96ef93..8c15ffd3568b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -263,8 +263,6 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = { .get_rfreg = rtl88e_phy_query_rf_reg, .set_rfreg = rtl88e_phy_set_rf_reg, .get_btc_status = rtl88e_get_btc_status, - .rx_command_packet = rtl88ee_rx_command_packet, - }; static struct rtl_mod_params rtl88ee_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 9670732b2bc6..4c1f8b08fc10 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -850,10 +850,3 @@ void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - return 0; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index f902d6769aa8..127ba977206f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -790,8 +790,4 @@ void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); - #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h index b90aaf128072..d2005d7e9ad2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h @@ -142,31 +142,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_Read_MACREG = 0, - HAL_FW_C2H_CMD_Read_BBREG = 1, - HAL_FW_C2H_CMD_Read_RFREG = 2, - HAL_FW_C2H_CMD_Read_EEPROM = 3, - HAL_FW_C2H_CMD_Read_EFUSE = 4, - HAL_FW_C2H_CMD_Read_CAM = 5, - HAL_FW_C2H_CMD_Get_BasicRate = 6, - HAL_FW_C2H_CMD_Get_DataRate = 7, - HAL_FW_C2H_CMD_Survey = 8, - HAL_FW_C2H_CMD_SurveyDone = 9, - HAL_FW_C2H_CMD_JoinBss = 10, - HAL_FW_C2H_CMD_AddSTA = 11, - HAL_FW_C2H_CMD_DelSTA = 12, - HAL_FW_C2H_CMD_AtimDone = 13, - HAL_FW_C2H_CMD_TX_Report = 14, - HAL_FW_C2H_CMD_CCX_Report = 15, - HAL_FW_C2H_CMD_DTM_Report = 16, - HAL_FW_C2H_CMD_TX_Rate_Statistics = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h index 60f5728b4e2d..9f7e7bb8610b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h @@ -47,14 +47,6 @@ enum version_8192e { VERSION_UNKNOWN = 0xFF, }; -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, - C2H_PACKET, -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index f9563ae301ad..84a0d0eb72e1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -876,85 +876,11 @@ void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) (u8 *)p2p_ps_offload); } -static void _rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, - u8 *cmd_buf, u8 cmd_len) +void rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len) { u8 rate = cmd_buf[0] & 0x3F; bool collision_state = cmd_buf[3] & BIT(0); rtl92ee_dm_dynamic_arfb_select(hw, rate, collision_state); } - -void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - - switch (c2h_cmd_id) { - case C2H_8192E_DBG: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_DBG!!\n"); - break; - case C2H_8192E_TXBF: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8192E_TXBF!!\n"); - break; - case C2H_8192E_TX_REPORT: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE , - "[C2H], C2H_8723BE_TX_REPORT!\n"); - rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_8192E_BT_INFO: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_INFO!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_8192E_BT_MP: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_MP!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_8192E_RA_RPT: - _rtl92ee_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - default: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id); - break; - } -} - -void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; - u8 *tmp_buf = NULL; - - c2h_cmd_id = buffer[0]; - c2h_cmd_seq = buffer[1]; - c2h_cmd_len = len - 2; - tmp_buf = buffer + 2; - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", - c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - - RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - - switch (c2h_cmd_id) { - case C2H_8192E_BT_INFO: - case C2H_8192E_BT_MP: - rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - default: - rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, - tmp_buf); - break; - } -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index b770f722daa6..6a2fbf20d579 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h @@ -128,17 +128,6 @@ enum rtl8192e_h2c_cmd { MAX_92E_H2CCMD }; -enum rtl8192e_c2h_evt { - C2H_8192E_DBG = 0, - C2H_8192E_LB = 1, - C2H_8192E_TXBF = 2, - C2H_8192E_TX_REPORT = 3, - C2H_8192E_BT_INFO = 9, - C2H_8192E_BT_MP = 11, - C2H_8192E_RA_RPT = 12, - MAX_8192E_C2HEVENT -}; - #define pagenum_128(_len) \ (u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0)) @@ -190,7 +179,6 @@ void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); -void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); -void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf); +void rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index fd7928fdbd1a..3f2295fdb02d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2574,11 +2574,11 @@ void rtl92ee_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw, rtlpriv->btcoexist.btc_info.btcoexist = 0; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E; - rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X2; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X2; } else { rtlpriv->btcoexist.btc_info.btcoexist = 1; rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8192E; - rtlpriv->btcoexist.btc_info.ant_num = ANT_TOTAL_X1; + rtlpriv->btcoexist.btc_info.ant_num = ANT_X1; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index ef92a789871d..9ea62599ecbb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -250,8 +250,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = { .set_rfreg = rtl92ee_phy_set_rf_reg, .fill_h2c_cmd = rtl92ee_fill_h2c_cmd, .get_btc_status = rtl92ee_get_btc_status, - .rx_command_packet = rtl92ee_rx_command_packet, - .c2h_content_parsing = rtl92ee_c2h_content_parsing, + .c2h_ra_report_handler = rtl92ee_c2h_ra_report_handler, }; static struct rtl_mod_params rtl92ee_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 4f7444331b07..14d6e3fc5767 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -662,6 +662,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; @@ -723,8 +724,6 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); } - /* tx report */ - rtl_get_tx_report(ptcb_desc, pdesc, hw); SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); @@ -827,6 +826,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_HTC(pdesc, 1); } } + /* tx report */ + rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); } SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); @@ -1071,27 +1072,3 @@ bool rtl92ee_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index) void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { } - -u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - u32 result = 0; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - switch (status->packet_report_type) { - case NORMAL_RX: - result = 0; - break; - case C2H_PACKET: - rtl92ee_c2h_packet_handler(hw, skb->data, (u8)skb->len); - result = 1; - break; - default: - RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE, - "Unknown packet type %d\n", status->packet_report_type); - break; - } - - return result; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index 48c16fff20c6..45df3e79f490 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -762,7 +762,4 @@ void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h index bcdf2273688e..847544817549 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h @@ -152,31 +152,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_Read_MACREG = 0, - HAL_FW_C2H_CMD_Read_BBREG = 1, - HAL_FW_C2H_CMD_Read_RFREG = 2, - HAL_FW_C2H_CMD_Read_EEPROM = 3, - HAL_FW_C2H_CMD_Read_EFUSE = 4, - HAL_FW_C2H_CMD_Read_CAM = 5, - HAL_FW_C2H_CMD_Get_BasicRate = 6, - HAL_FW_C2H_CMD_Get_DataRate = 7, - HAL_FW_C2H_CMD_Survey = 8, - HAL_FW_C2H_CMD_SurveyDone = 9, - HAL_FW_C2H_CMD_JoinBss = 10, - HAL_FW_C2H_CMD_AddSTA = 11, - HAL_FW_C2H_CMD_DelSTA = 12, - HAL_FW_C2H_CMD_AtimDone = 13, - HAL_FW_C2H_CMD_TX_Report = 14, - HAL_FW_C2H_CMD_CCX_Report = 15, - HAL_FW_C2H_CMD_DTM_Report = 16, - HAL_FW_C2H_CMD_TX_Rate_Statistics = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index ec9bcf32f0ab..788de88ab1ee 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c @@ -1749,13 +1749,13 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw) switch (c2h_event.cmd_id) { - case C2H_BT_RSSI: + case C2H_V0_BT_RSSI: break; - case C2H_BT_OP_MODE: + case C2H_V0_BT_OP_MODE: break; - case BT_INFO: + case C2H_V0_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h index 3723d7476717..756868897d8b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h @@ -130,17 +130,17 @@ enum bt_state { BT_INFO_STATE_MAX = 7 }; -enum rtl8723e_c2h_evt { - C2H_DBG = 0, - C2H_TSF = 1, - C2H_AP_RPT_RSP = 2, +enum rtl8723e_c2h_evt_v0 { + C2H_V0_DBG = 0, + C2H_V0_TSF = 1, + C2H_V0_AP_RPT_RSP = 2, /* The FW notify the report of the specific tx packet. */ - C2H_CCX_TX_RPT = 3, - C2H_BT_RSSI = 4, - C2H_BT_OP_MODE = 5, - C2H_HW_INFO_EXCH = 10, - C2H_C2H_H2C_TEST = 11, - BT_INFO = 12, + C2H_V0_CCX_TX_RPT = 3, + C2H_V0_BT_RSSI = 4, + C2H_V0_BT_OP_MODE = 5, + C2H_V0_HW_INFO_EXCH = 10, + C2H_V0_C2H_H2C_TEST = 11, + C2H_V0_BT_INFO = 12, MAX_C2HEVENT }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index a545ea317323..07b82700d1de 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -260,7 +260,6 @@ static struct rtl_hal_ops rtl8723e_hal_ops = { .bt_coex_off_before_lps = rtl8723e_dm_bt_turn_off_bt_coexist_before_enter_lps, .get_btc_status = rtl8723e_get_btc_status, - .rx_command_packet = rtl8723e_rx_command_packet, .is_fw_header = is_fw_header, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 23485602a9a1..d461d0c9631f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -709,10 +709,3 @@ void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - return 0; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 985ce0b77ea5..d592b08d4ac8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -716,7 +716,4 @@ void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h index 025ea5c0f3f6..5e5403d69220 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h @@ -38,14 +38,6 @@ /* Currently only for RTL8723B */ #define EXT_VENDOR_ID (BIT(18) | BIT(19)) -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, - C2H_PACKET, -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index 4b963fd27d64..f2441fbb92f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -703,72 +703,3 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, rtl8723be_fill_h2c_cmd(hw, H2C_8723B_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); } - -void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - - switch (c2h_cmd_id) { - case C2H_8723B_DBG: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_DBG!!\n"); - break; - case C2H_8723B_TX_REPORT: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_TX_REPORT!\n"); - rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_8723B_BT_INFO: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_INFO!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_8723B_BT_MP: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8723BE_BT_MP!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - default: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], Unknown packet!! CmdId(%#X)!\n", c2h_cmd_id); - break; - } -} - -void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; - u8 *tmp_buf = NULL; - - c2h_cmd_id = buffer[0]; - c2h_cmd_seq = buffer[1]; - c2h_cmd_len = len - 2; - tmp_buf = buffer + 2; - - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", - c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - - RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - - switch (c2h_cmd_id) { - case C2H_8723B_BT_INFO: - case C2H_8723B_BT_MP: - rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - - default: - rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, - tmp_buf); - break; - } -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h index 2de4edb62bca..948d28646364 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h @@ -100,16 +100,6 @@ enum rtl8723b_h2c_cmd { MAX_8723B_H2CCMD }; -enum rtl8723b_c2h_evt { - C2H_8723B_DBG = 0, - C2H_8723B_LB = 1, - C2H_8723B_TXBF = 2, - C2H_8723B_TX_REPORT = 3, - C2H_8723B_BT_INFO = 9, - C2H_8723B_BT_MP = 11, - MAX_8723B_C2HEVENT -}; - #define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0)) @@ -153,7 +143,4 @@ void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus); void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); -void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len); -void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, - u8 c2h_cmd_len, u8 *tmp_buf); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 6a42988aad65..c9f7b042d9c6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -261,9 +261,7 @@ static struct rtl_hal_ops rtl8723be_hal_ops = { .set_rfreg = rtl8723be_phy_set_rf_reg, .fill_h2c_cmd = rtl8723be_fill_h2c_cmd, .get_btc_status = rtl8723be_get_btc_status, - .rx_command_packet = rtl8723be_rx_command_packet, .is_fw_header = is_fw_header, - .c2h_content_parsing = rtl8723be_c2h_content_parsing, }; static struct rtl_mod_params rtl8723be_mod_params = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index fd9b38aa08a1..9f8dfb5af774 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -431,6 +431,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; @@ -488,8 +489,6 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); } - /* tx report */ - rtl_get_tx_report(ptcb_desc, pdesc, hw); /* ptcb_desc->use_driver_rate = true; */ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); @@ -578,6 +577,8 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_HTC(pdesc, 1); } } + /* tx report */ + rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); } SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); @@ -760,28 +761,3 @@ void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - u32 result = 0; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - switch (status->packet_report_type) { - case NORMAL_RX: - result = 0; - break; - case C2H_PACKET: - rtl8723be_c2h_packet_handler(hw, skb->data, - (u8)skb->len); - result = 1; - break; - default: - RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE, - "No this packet type!!\n"); - break; - } - - return result; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 988bf0586674..609f7ad7f787 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -632,7 +632,4 @@ void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h index dfbdf539de1a..3fe3aaa5fe3c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h @@ -296,31 +296,6 @@ enum interface_select_pci { INTF_SEL3_RSV = 3, }; -enum hal_fw_c2h_cmd_id { - HAL_FW_C2H_CMD_READ_MACREG = 0, - HAL_FW_C2H_CMD_READ_BBREG = 1, - HAL_FW_C2H_CMD_READ_RFREG = 2, - HAL_FW_C2H_CMD_READ_EEPROM = 3, - HAL_FW_C2H_CMD_READ_EFUSE = 4, - HAL_FW_C2H_CMD_READ_CAM = 5, - HAL_FW_C2H_CMD_GET_BASICRATE = 6, - HAL_FW_C2H_CMD_GET_DATARATE = 7, - HAL_FW_C2H_CMD_SURVEY = 8, - HAL_FW_C2H_CMD_SURVEYDONE = 9, - HAL_FW_C2H_CMD_JOINBSS = 10, - HAL_FW_C2H_CMD_ADDSTA = 11, - HAL_FW_C2H_CMD_DELSTA = 12, - HAL_FW_C2H_CMD_ATIMDONE = 13, - HAL_FW_C2H_CMD_TX_REPORT = 14, - HAL_FW_C2H_CMD_CCX_REPORT = 15, - HAL_FW_C2H_CMD_DTM_REPORT = 16, - HAL_FW_C2H_CMD_TX_RATE_STATISTICS = 17, - HAL_FW_C2H_CMD_C2HLBK = 18, - HAL_FW_C2H_CMD_C2HDBG = 19, - HAL_FW_C2H_CMD_C2HFEEDBACK = 20, - HAL_FW_C2H_CMD_MAX -}; - enum rtl_desc_qsel { QSLT_BK = 0x2, QSLT_BE = 0x0, @@ -332,14 +307,6 @@ enum rtl_desc_qsel { QSLT_CMD = 0x13, }; -enum rx_packet_type { - NORMAL_RX, - TX_REPORT1, - TX_REPORT2, - HIS_REPORT, - C2H_PACKET, -}; - struct phy_sts_cck_8821ae_t { u8 adc_pwdb_X[4]; u8 sq_rpt; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index 9111ba7ff0a1..3be8c88971e2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -2642,7 +2642,7 @@ static void rtl8821ae_dm_edca_choose_traffic_idx( if (cur_tx_bytes > (cur_rx_bytes*4)) { *pb_is_cur_rdl_state = false; RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, - "Uplink Traffic\n "); + "Uplink Traffic\n"); } else { *pb_is_cur_rdl_state = true; RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index f2b2c549e5b2..d868a034659f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1907,7 +1907,7 @@ void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state) H2C_8821AE_P2P_PS_OFFLOAD, 1, (u8 *)p2p_ps_offload); } -static void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, +void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, u8 *cmd_buf, u8 cmd_len) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1917,70 +1917,3 @@ static void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, rtl8821ae_dm_update_init_rate(hw, rate); } - -void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, u8 c2h_cmd_len, - u8 *tmp_buf) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - - switch (c2h_cmd_id) { - case C2H_8812_DBG: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_DBG!!\n"); - break; - case C2H_8812_TX_REPORT: - rtl_tx_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_8812_RA_RPT: - rtl8821ae_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); - break; - case C2H_8812_BT_INFO: - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "[C2H], C2H_8812_BT_INFO!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - case C2H_8812_BT_MP: - RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, - "[C2H], C2H_8812_BT_MP!!\n"); - if (rtlpriv->cfg->ops->get_btc_status()) - btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); - break; - default: - break; - } -} - -void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, - u8 length) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 c2h_cmd_id = 0, c2h_cmd_seq = 0, c2h_cmd_len = 0; - u8 *tmp_buf = NULL; - - c2h_cmd_id = buffer[0]; - c2h_cmd_seq = buffer[1]; - c2h_cmd_len = length - 2; - tmp_buf = buffer + 2; - - RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, - "[C2H packet], c2hCmdId=0x%x, c2hCmdSeq=0x%x, c2hCmdLen=%d\n", - c2h_cmd_id, c2h_cmd_seq, c2h_cmd_len); - - RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, - "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len); - - switch (c2h_cmd_id) { - case C2H_8812_BT_INFO: - rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf); - break; - - default: - rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, - tmp_buf); - break; - } -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index 32d46d7128f5..99c902ff0b84 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -137,20 +137,6 @@ #define FW_PWR_STATE_ACTIVE ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE)) #define FW_PWR_STATE_RF_OFF 0 -enum rtl8812_c2h_evt { - C2H_8812_DBG = 0, - C2H_8812_LB = 1, - C2H_8812_TXBF = 2, - C2H_8812_TX_REPORT = 3, - C2H_8812_BT_INFO = 9, - C2H_8812_BT_MP = 11, - C2H_8812_RA_RPT = 12, - - C2H_8812_FW_SWCHNL = 0x10, - C2H_8812_IQK_FINISH = 0x11, - MAX_8812_C2HEVENT -}; - enum rtl8821a_h2c_cmd { H2C_8821AE_RSVDPAGE = 0, H2C_8821AE_MSRRPT = 1, @@ -331,9 +317,6 @@ void rtl8821ae_set_fw_keep_alive_cmd(struct ieee80211_hw *hw, bool func_en); void rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(struct ieee80211_hw *hw, bool enabled); void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw); -void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, - u8 *buffer, u8 length); -void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, - u8 c2h_cmd_id, u8 c2h_cmd_len, - u8 *tmp_buf); +void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 9bb3d9dfce79..77f6401021c9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -303,8 +303,7 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = { .set_rfreg = rtl8821ae_phy_set_rf_reg, .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd, .get_btc_status = rtl8821ae_get_btc_status, - .rx_command_packet = rtl8821ae_rx_command_packet, - .c2h_content_parsing = rtl8821ae_c2h_content_parsing, + .c2h_ra_report_handler = rtl8821ae_c2h_ra_report_handler, .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern, }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 1e1bacf562f3..d7960dd5bf1a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -691,6 +691,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtlwifi_tx_info *tx_info = rtl_tx_skb_cb_info(skb); u8 *pdesc = (u8 *)pdesc_tx; u16 seq_number; __le16 fc = hdr->frame_control; @@ -740,8 +741,6 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); } - /* tx report */ - rtl_get_tx_report(ptcb_desc, pdesc, hw); /* ptcb_desc->use_driver_rate = true; */ SET_TX_DESC_TX_RATE(pdesc, ptcb_desc->hw_rate); @@ -818,6 +817,8 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_HTC(pdesc, 1); } } + /* tx report */ + rtl_set_tx_report(ptcb_desc, pdesc, hw, tx_info); } SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); @@ -1004,29 +1005,3 @@ void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) BIT(0) << (hw_queue)); } } - -u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb) -{ - u32 result = 0; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - switch (status->packet_report_type) { - case NORMAL_RX: - result = 0; - break; - case C2H_PACKET: - rtl8821ae_c2h_packet_handler(hw, skb->data, (u8)skb->len); - result = 1; - RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, - "skb->len=%d\n\n", skb->len); - break; - default: - RT_TRACE(rtlpriv, COMP_RECV, DBG_LOUD, - "No this packet type!!\n"); - break; - } - - return result; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index 221dd2b29d3b..4ff0968dba81 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -628,7 +628,4 @@ void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); -u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, - const struct rtl_stats *status, - struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index ce1754054a07..0f3b98c5227f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -158,6 +158,30 @@ enum { H2C_BT_PORT_ID = 0x71, }; +enum rtl_c2h_evt_v1 { + C2H_DBG = 0, + C2H_LB = 1, + C2H_TXBF = 2, + C2H_TX_REPORT = 3, + C2H_BT_INFO = 9, + C2H_BT_MP = 11, + C2H_RA_RPT = 12, + + C2H_FW_SWCHNL = 0x10, + C2H_IQK_FINISH = 0x11, + + C2H_EXT_V2 = 0xFF, +}; + +enum rtl_c2h_evt_v2 { + C2H_V2_CCX_RPT = 0x0F, +}; + +#define GET_C2H_CMD_ID(c2h) ({u8 *__c2h = c2h; __c2h[0]; }) +#define GET_C2H_SEQ(c2h) ({u8 *__c2h = c2h; __c2h[1]; }) +#define C2H_DATA_OFFSET 2 +#define GET_C2H_DATA_PTR(c2h) ({u8 *__c2h = c2h; &__c2h[C2H_DATA_OFFSET]; }) + #define GET_TX_REPORT_SN_V1(c2h) (c2h[6]) #define GET_TX_REPORT_ST_V1(c2h) (c2h[0] & 0xC0) #define GET_TX_REPORT_RETRY_V1(c2h) (c2h[2] & 0x3F) @@ -1010,6 +1034,29 @@ enum dm_info_query { DM_INFO_SIZE, }; +enum rx_packet_type { + NORMAL_RX, + TX_REPORT1, + TX_REPORT2, + HIS_REPORT, + C2H_PACKET, +}; + +struct rtlwifi_tx_info { + int sn; + unsigned long send_time; +}; + +static inline struct rtlwifi_tx_info *rtl_tx_skb_cb_info(struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + BUILD_BUG_ON(sizeof(struct rtlwifi_tx_info) > + sizeof(info->status.status_driver_data)); + + return (struct rtlwifi_tx_info *)(info->status.status_driver_data); +} + struct octet_string { u8 *octet; u16 length; @@ -1967,6 +2014,7 @@ struct rtl_tx_report { u16 last_sent_sn; unsigned long last_sent_time; u16 last_recv_sn; + struct sk_buff_head queue; }; struct rtl_ps_ctl { @@ -2297,14 +2345,12 @@ struct rtl_hal_ops { void (*set_default_port_id_cmd)(struct ieee80211_hw *hw); bool (*get_btc_status) (void); bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); - u32 (*rx_command_packet)(struct ieee80211_hw *hw, - const struct rtl_stats *status, struct sk_buff *skb); void (*add_wowlan_pattern)(struct ieee80211_hw *hw, struct rtl_wow_pattern *rtl_pattern, u8 index); u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx); - void (*c2h_content_parsing)(struct ieee80211_hw *hw, u8 tag, u8 len, - u8 *val); + void (*c2h_ra_report_handler)(struct ieee80211_hw *hw, + u8 *cmd_buf, u8 cmd_len); }; struct rtl_intf_ops { @@ -2750,7 +2796,7 @@ struct rtl_priv { struct list_head entry_list; /* c2hcmd list for kthread level access */ - struct list_head c2hcmd_list; + struct sk_buff_head c2hcmd_queue; struct rtl_debug dbg; int max_fw_size; @@ -2842,11 +2888,6 @@ enum bt_co_type { BT_RTL8812A = 11, }; -enum bt_total_ant_num { - ANT_TOTAL_X2 = 0, - ANT_TOTAL_X1 = 1 -}; - enum bt_cur_state { BT_OFF = 0, BT_ON = 1, diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index d055099dadf1..c8ba148f8c6c 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -73,6 +73,7 @@ int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg) switch (msg_type) { case COMMON_CARD_READY_IND: rsi_dbg(INFO_ZONE, "common card ready received\n"); + common->hibernate_resume = false; rsi_handle_card_ready(common, msg); break; case SLEEP_NOTIFY_IND: diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 5dafd2e1306c..3644d7d99463 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -411,11 +411,30 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) if ((ieee80211_is_mgmt(wh->frame_control)) || (ieee80211_is_ctl(wh->frame_control)) || (ieee80211_is_qos_nullfunc(wh->frame_control))) { + if (ieee80211_is_assoc_req(wh->frame_control) || + ieee80211_is_reassoc_req(wh->frame_control)) { + struct ieee80211_bss_conf *bss = &vif->bss_conf; + + common->eapol4_confirm = false; + rsi_hal_send_sta_notify_frame(common, + RSI_IFTYPE_STATION, + STA_CONNECTED, bss->bssid, + bss->qos, bss->aid, 0, + vif); + } + q_num = MGMT_SOFT_Q; skb->priority = q_num; + + if (rsi_prepare_mgmt_desc(common, skb)) { + rsi_dbg(ERR_ZONE, "Failed to prepare desc\n"); + goto xmit_fail; + } } else { if (ieee80211_is_data_qos(wh->frame_control)) { - tid = (skb->data[24] & IEEE80211_QOS_TID); + u8 *qos = ieee80211_get_qos_ctl(wh); + + tid = *qos & IEEE80211_QOS_CTL_TID_MASK; skb->priority = TID_TO_WME_AC(tid); } else { tid = IEEE80211_NONQOS_TID; @@ -433,6 +452,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) if (!rsta) goto xmit_fail; tx_params->sta_id = rsta->sta_id; + } else { + tx_params->sta_id = 0; } if (rsta) { @@ -443,6 +464,14 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) tid, 0); } } + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + q_num = MGMT_SOFT_Q; + skb->priority = q_num; + } + if (rsi_prepare_data_desc(common, skb)) { + rsi_dbg(ERR_ZONE, "Failed to prepare data desc\n"); + goto xmit_fail; + } } if ((q_num < MGMT_SOFT_Q) && @@ -456,7 +485,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) } rsi_core_queue_pkt(common, skb); - rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__); + rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thread <===\n", __func__); rsi_set_event(&common->tx_thread.event); return; diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index de608ae365a4..0761e61591bd 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -45,7 +45,7 @@ int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) return status; } -static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) +int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; struct ieee80211_hdr *wh = NULL; @@ -55,7 +55,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) struct rsi_mgmt_desc *mgmt_desc; struct skb_info *tx_params; struct ieee80211_bss_conf *bss = NULL; - struct xtended_desc *xtend_desc = NULL; + struct rsi_xtended_desc *xtend_desc = NULL; u8 header_size; u32 dword_align_bytes = 0; @@ -69,7 +69,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) vif = tx_params->vif; /* Update header size */ - header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Failed to add extended descriptor\n", @@ -92,7 +92,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) wh = (struct ieee80211_hdr *)&skb->data[header_size]; mgmt_desc = (struct rsi_mgmt_desc *)skb->data; - xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ]; rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); @@ -113,17 +113,6 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) if (conf_is_ht40(conf)) mgmt_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); - if (ieee80211_is_probe_req(wh->frame_control)) { - if (!bss->assoc) { - rsi_dbg(INFO_ZONE, - "%s: blocking mgmt queue\n", __func__); - mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST; - xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM; - common->mgmt_q_block = true; - rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n"); - } - } - if (ieee80211_is_probe_resp(wh->frame_control)) { mgmt_desc->misc_flags |= (RSI_ADD_DELTA_TSF_VAP_ID | RSI_FETCH_RETRY_CNT_FRM_HST); @@ -149,7 +138,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) } /* This function prepares descriptor for given data packet */ -static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) +int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; struct ieee80211_vif *vif; @@ -158,7 +147,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) struct skb_info *tx_params; struct ieee80211_bss_conf *bss; struct rsi_data_desc *data_desc; - struct xtended_desc *xtend_desc; + struct rsi_xtended_desc *xtend_desc; u8 ieee80211_size = MIN_802_11_HDR_LEN; u8 header_size; u8 vap_id = 0; @@ -170,7 +159,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) bss = &vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; - header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); return -ENOSPC; @@ -188,7 +177,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) data_desc = (struct rsi_data_desc *)skb->data; memset(data_desc, 0, header_size); - xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ]; wh = (struct ieee80211_hdr *)&skb->data[header_size]; seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl)); @@ -243,6 +232,18 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) data_desc->misc_flags |= RSI_FETCH_RETRY_CNT_FRM_HST; #define EAPOL_RETRY_CNT 15 xtend_desc->retry_cnt = EAPOL_RETRY_CNT; + + if (common->eapol4_confirm) + skb->priority = VO_Q; + else + rsi_set_len_qno(&data_desc->len_qno, + (skb->len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + if ((skb->len - header_size) == EAPOL4_PACKET_LEN) { + data_desc->misc_flags |= + RSI_DESC_REQUIRE_CFM_TO_HOST; + xtend_desc->confirm_frame_type = EAPOL4_CONFIRM; + } } data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); @@ -282,8 +283,11 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) struct rsi_hw *adapter = common->priv; struct ieee80211_vif *vif; struct ieee80211_tx_info *info; + struct skb_info *tx_params; struct ieee80211_bss_conf *bss; + struct ieee80211_hdr *wh; int status = -EINVAL; + u8 header_size; if (!skb) return 0; @@ -295,16 +299,15 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) goto err; vif = info->control.vif; bss = &vif->bss_conf; + tx_params = (struct skb_info *)info->driver_data; + header_size = tx_params->internal_hdr_size; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; if (((vif->type == NL80211_IFTYPE_STATION) || (vif->type == NL80211_IFTYPE_P2P_CLIENT)) && (!bss->assoc)) goto err; - status = rsi_prepare_data_desc(common, skb); - if (status) - goto err; - status = rsi_send_pkt_to_bus(common, skb); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", __func__); @@ -327,12 +330,18 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; + struct ieee80211_bss_conf *bss; + struct ieee80211_hdr *wh; struct ieee80211_tx_info *info; struct skb_info *tx_params; + struct rsi_mgmt_desc *mgmt_desc; + struct rsi_xtended_desc *xtend_desc; int status = -E2BIG; + u8 header_size; info = IEEE80211_SKB_CB(skb); tx_params = (struct skb_info *)info->driver_data; + header_size = tx_params->internal_hdr_size; if (tx_params->flags & INTERNAL_MGMT_PKT) { status = adapter->host_intf_ops->write_pkt(common->priv, @@ -346,15 +355,25 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, return status; } - if (FRAME_DESC_SZ > skb_headroom(skb)) - goto err; + bss = &info->control.vif->bss_conf; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + mgmt_desc = (struct rsi_mgmt_desc *)skb->data; + xtend_desc = (struct rsi_xtended_desc *)&skb->data[FRAME_DESC_SZ]; + + /* Indicate to firmware to give cfm for probe */ + if (ieee80211_is_probe_req(wh->frame_control) && !bss->assoc) { + rsi_dbg(INFO_ZONE, + "%s: blocking mgmt queue\n", __func__); + mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST; + xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM; + common->mgmt_q_block = true; + rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n"); + } - rsi_prepare_mgmt_desc(common, skb); status = rsi_send_pkt_to_bus(common, skb); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__); -err: rsi_indicate_tx_status(common->priv, skb, status); return status; } @@ -616,28 +635,32 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, u32 content_size) { struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops; - struct bl_header bl_hdr; + struct bl_header *bl_hdr; u32 write_addr, write_len; int status; - bl_hdr.flags = 0; - bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode); - bl_hdr.check_sum = cpu_to_le32( - *(u32 *)&flash_content[CHECK_SUM_OFFSET]); - bl_hdr.flash_start_address = cpu_to_le32( - *(u32 *)&flash_content[ADDR_OFFSET]); - bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); + bl_hdr = kzalloc(sizeof(*bl_hdr), GFP_KERNEL); + if (!bl_hdr) + return -ENOMEM; + + bl_hdr->flags = 0; + bl_hdr->image_no = cpu_to_le32(adapter->priv->coex_mode); + bl_hdr->check_sum = + cpu_to_le32(*(u32 *)&flash_content[CHECK_SUM_OFFSET]); + bl_hdr->flash_start_address = + cpu_to_le32(*(u32 *)&flash_content[ADDR_OFFSET]); + bl_hdr->flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); write_len = sizeof(struct bl_header); if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) { write_addr = PING_BUFFER_ADDRESS; status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } else { write_addr = PING_BUFFER_ADDRESS >> 16; @@ -646,20 +669,23 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return status; + goto fail; } write_addr = RSI_SD_REQUEST_MASTER | (PING_BUFFER_ADDRESS & 0xFFFF); status = hif_ops->write_reg_multiple(adapter, write_addr, - (u8 *)&bl_hdr, write_len); + (u8 *)bl_hdr, write_len); if (status < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to load Version/CRC structure\n", __func__); - return status; + goto fail; } } - return 0; + status = 0; +fail: + kfree(bl_hdr); + return status; } static u32 read_flash_capacity(struct rsi_hw *adapter) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 32f5cb46fd4f..2ca7464b7fa3 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -245,6 +245,7 @@ void rsi_mac80211_detach(struct rsi_hw *adapter) ieee80211_stop_queues(hw); ieee80211_unregister_hw(hw); ieee80211_free_hw(hw); + adapter->hw = NULL; } for (band = 0; band < NUM_NL80211_BANDS; band++) { @@ -614,7 +615,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, /* Power save parameters */ if (changed & IEEE80211_CONF_CHANGE_PS) { - struct ieee80211_vif *vif; + struct ieee80211_vif *vif, *sta_vif = NULL; unsigned long flags; int i, set_ps = 1; @@ -628,13 +629,17 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, set_ps = 0; break; } + if ((vif->type == NL80211_IFTYPE_STATION || + vif->type == NL80211_IFTYPE_P2P_CLIENT) && + (!sta_vif || vif->bss_conf.assoc)) + sta_vif = vif; } - if (set_ps) { + if (set_ps && sta_vif) { spin_lock_irqsave(&adapter->ps_lock, flags); if (conf->flags & IEEE80211_CONF_PS) - rsi_enable_ps(adapter, vif); + rsi_enable_ps(adapter, sta_vif); else - rsi_disable_ps(adapter, vif); + rsi_disable_ps(adapter, sta_vif); spin_unlock_irqrestore(&adapter->ps_lock, flags); } } @@ -737,7 +742,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid, bss_conf->qos, bss_conf->aid, - NULL, 0, vif); + NULL, 0, + bss_conf->assoc_capability, vif); adapter->ps_info.dtim_interval_duration = bss->dtim_period; adapter->ps_info.listen_interval = conf->listen_interval; @@ -906,14 +912,25 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw, } } - return rsi_hal_load_key(adapter->priv, - key->key, - key->keylen, - key_type, - key->keyidx, - key->cipher, - sta_id, - vif); + status = rsi_hal_load_key(adapter->priv, + key->key, + key->keylen, + key_type, + key->keyidx, + key->cipher, + sta_id, + vif); + if (status) + return status; + + if (vif->type == NL80211_IFTYPE_STATION && key->key && + (key->cipher == WLAN_CIPHER_SUITE_WEP104 || + key->cipher == WLAN_CIPHER_SUITE_WEP40)) { + if (!rsi_send_block_unblock_frame(adapter->priv, false)) + adapter->priv->hw_data_qs_blocked = false; + } + + return 0; } /** @@ -1086,7 +1103,7 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, break; default: - rsi_dbg(ERR_ZONE, "%s: Uknown AMPDU action\n", __func__); + rsi_dbg(ERR_ZONE, "%s: Unknown AMPDU action\n", __func__); break; } @@ -1391,7 +1408,7 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, rsi_dbg(INFO_ZONE, "Indicate bss status to device\n"); rsi_inform_bss_status(common, RSI_OPMODE_AP, 1, sta->addr, sta->wme, sta->aid, - sta, sta_idx, vif); + sta, sta_idx, 0, vif); if (common->key) { struct ieee80211_key_conf *key = common->key; @@ -1469,7 +1486,7 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, rsi_inform_bss_status(common, RSI_OPMODE_AP, 0, sta->addr, sta->wme, sta->aid, sta, sta_idx, - vif); + 0, vif); rsta->sta = NULL; rsta->sta_id = -1; for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) @@ -1788,15 +1805,21 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan) struct rsi_common *common = adapter->priv; u16 triggers = 0; u16 rx_filter_word = 0; - struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf; + struct ieee80211_bss_conf *bss = NULL; rsi_dbg(INFO_ZONE, "Config WoWLAN to device\n"); + if (!adapter->vifs[0]) + return -EINVAL; + + bss = &adapter->vifs[0]->bss_conf; + if (WARN_ON(!wowlan)) { rsi_dbg(ERR_ZONE, "WoW triggers not enabled\n"); return -EINVAL; } + common->wow_flags |= RSI_WOW_ENABLED; triggers = rsi_wow_map_triggers(common, wowlan); if (!triggers) { rsi_dbg(ERR_ZONE, "%s:No valid WoW triggers\n", __func__); @@ -1819,7 +1842,6 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan) rx_filter_word = (ALLOW_DATA_ASSOC_PEER | DISALLOW_BEACONS); rsi_send_rx_filter_frame(common, rx_filter_word); - common->wow_flags |= RSI_WOW_ENABLED; return 0; } @@ -1939,9 +1961,8 @@ int rsi_mac80211_attach(struct rsi_common *common) hw->uapsd_queues = RSI_IEEE80211_UAPSD_QUEUES; hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; - hw->max_tx_aggregation_subframes = 6; - rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); - rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); + hw->max_tx_aggregation_subframes = RSI_MAX_TX_AGGR_FRMS; + hw->max_rx_aggregation_subframes = RSI_MAX_RX_AGGR_FRMS; hw->rate_control_algorithm = "AARF"; SET_IEEE80211_PERM_ADDR(hw, common->mac_addr); @@ -1962,10 +1983,15 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->available_antennas_rx = 1; wiphy->available_antennas_tx = 1; + + rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); wiphy->bands[NL80211_BAND_2GHZ] = &adapter->sbands[NL80211_BAND_2GHZ]; - wiphy->bands[NL80211_BAND_5GHZ] = - &adapter->sbands[NL80211_BAND_5GHZ]; + if (common->num_supp_bands > 1) { + rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ); + wiphy->bands[NL80211_BAND_5GHZ] = + &adapter->sbands[NL80211_BAND_5GHZ]; + } /* AP Parameters */ wiphy->max_ap_assoc_sta = rsi_max_ap_stas[common->oper_mode - 1]; @@ -1991,6 +2017,9 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->iface_combinations = rsi_iface_combinations; wiphy->n_iface_combinations = ARRAY_SIZE(rsi_iface_combinations); + if (common->coex_mode > 1) + wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + status = ieee80211_register_hw(hw); if (status) return status; diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index c21fca750fd4..d0e5937cad6d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -325,8 +325,8 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->channel_num = common->channel; radio_caps->rf_model = RSI_RF_TYPE; + radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; if (common->channel_width == BW_40MHZ) { - radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ; if (common->fsm_state == FSM_MAC_INIT_DONE) { @@ -454,14 +454,10 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, * * Return: status: 0 on success, corresponding negative error code on failure. */ -static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, - enum opmode opmode, - u8 notify_event, - const unsigned char *bssid, - u8 qos_enable, - u16 aid, - u16 sta_id, - struct ieee80211_vif *vif) +int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode, + u8 notify_event, const unsigned char *bssid, + u8 qos_enable, u16 aid, u16 sta_id, + struct ieee80211_vif *vif) { struct sk_buff *skb = NULL; struct rsi_peer_notify *peer_notify; @@ -1194,6 +1190,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common, return -ENOMEM; } + memset(skb->data, 0, frame_len); selected_rates = kzalloc(2 * RSI_TBL_SZ, GFP_KERNEL); if (!selected_rates) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", @@ -1328,6 +1325,7 @@ void rsi_inform_bss_status(struct rsi_common *common, u16 aid, struct ieee80211_sta *sta, u16 sta_id, + u16 assoc_cap, struct ieee80211_vif *vif) { if (status) { @@ -1342,10 +1340,10 @@ void rsi_inform_bss_status(struct rsi_common *common, vif); if (common->min_rate == 0xffff) rsi_send_auto_rate_request(common, sta, sta_id, vif); - if (opmode == RSI_OPMODE_STA) { - if (!rsi_send_block_unblock_frame(common, false)) - common->hw_data_qs_blocked = false; - } + if (opmode == RSI_OPMODE_STA && + !(assoc_cap & WLAN_CAPABILITY_PRIVACY) && + !rsi_send_block_unblock_frame(common, false)) + common->hw_data_qs_blocked = false; } else { if (opmode == RSI_OPMODE_STA) common->hw_data_qs_blocked = true; @@ -1850,10 +1848,19 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) __func__); return rsi_handle_card_ready(common, msg); case TX_STATUS_IND: - if (msg[15] == PROBEREQ_CONFIRM) { + switch (msg[RSI_TX_STATUS_TYPE]) { + case PROBEREQ_CONFIRM: common->mgmt_q_block = false; rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); + break; + case EAPOL4_CONFIRM: + if (msg[RSI_TX_STATUS]) { + common->eapol4_confirm = true; + if (!rsi_send_block_unblock_frame(common, + false)) + common->hw_data_qs_blocked = false; + } } break; case BEACON_EVENT_IND: diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index d76e69c0beaa..416981d99229 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -170,7 +170,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) int err; struct mmc_card *card = pfunction->card; struct mmc_host *host = card->host; - s32 bit = (fls(host->ocr_avail) - 1); u8 cmd52_resp; u32 clock, resp, i; u16 rca; @@ -190,7 +189,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) msleep(20); /* Initialize the SDIO card */ - host->ios.vdd = bit; host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.power_mode = MMC_POWER_UP; @@ -660,8 +658,6 @@ static int rsi_sdio_master_reg_read(struct rsi_hw *adapter, u32 addr, if (!data) return -ENOMEM; - data = PTR_ALIGN(data, 8); - ms_addr = (addr >> 16); status = rsi_sdio_master_access_msword(adapter, ms_addr); if (status < 0) { @@ -724,8 +720,6 @@ static int rsi_sdio_master_reg_write(struct rsi_hw *adapter, if (!data_aligned) return -ENOMEM; - data_aligned = PTR_ALIGN(data_aligned, 8); - if (size == 2) { *data_aligned = ((data << 16) | (data & 0xFFFF)); } else if (size == 1) { @@ -1042,17 +1036,21 @@ static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, /*This function resets and re-initializes the chip.*/ static void rsi_reset_chip(struct rsi_hw *adapter) { - __le32 data; + u8 *data; u8 sdio_interrupt_status = 0; u8 request = 1; int ret; + data = kzalloc(sizeof(u32), GFP_KERNEL); + if (!data) + return; + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to write SDIO wakeup register\n", __func__); - return; + goto err; } msleep(20); ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, @@ -1060,7 +1058,7 @@ static void rsi_reset_chip(struct rsi_hw *adapter) if (ret < 0) { rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", __func__); - return; + goto err; } rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", __func__, sdio_interrupt_status); @@ -1070,17 +1068,17 @@ static void rsi_reset_chip(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Unable to set ms word to common reg\n", __func__); - return; + goto err; } - data = TA_HOLD_THREAD_VALUE; + put_unaligned_le32(TA_HOLD_THREAD_VALUE, data); if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER, - (u8 *)&data, 4)) { + data, 4)) { rsi_dbg(ERR_ZONE, "%s: Unable to hold Thread-Arch processor threads\n", __func__); - return; + goto err; } /* This msleep will ensure Thread-Arch processor to go to hold @@ -1101,6 +1099,9 @@ static void rsi_reset_chip(struct rsi_hw *adapter) * read write operations to complete for chip reset. */ msleep(500); +err: + kfree(data); + return; } /** diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 7b8bae313aa9..6ce6b754df12 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -687,6 +687,14 @@ static int rsi_reset_card(struct rsi_hw *adapter) */ msleep(100); + ret = rsi_usb_master_reg_write(adapter, SWBL_REGOUT, + RSI_FW_WDT_DISABLE_REQ, + RSI_COMMON_REG_SIZE); + if (ret < 0) { + rsi_dbg(ERR_ZONE, "Disabling firmware watchdog timer failed\n"); + goto fail; + } + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32); if (ret < 0) diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h index 238ee96434ec..ad903b22440e 100644 --- a/drivers/net/wireless/rsi/rsi_boot_params.h +++ b/drivers/net/wireless/rsi/rsi_boot_params.h @@ -46,7 +46,8 @@ (((TA_PLL_M_VAL_20 + 1) * 40) / \ ((TA_PLL_N_VAL_20 + 1) * (TA_PLL_P_VAL_20 + 1))) #define VALID_20 \ - (WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | WIFI_SWITCH_CLK_CONFIGS) + (WIFI_TAPLL_CONFIGS | WIFI_PLL960_CONFIGS | WIFI_AFEPLL_CONFIGS | \ + WIFI_SWITCH_CLK_CONFIGS | BOOTUP_MODE_INFO | CRYSTAL_GOOD_TIME) #define UMAC_CLK_40BW \ (((TA_PLL_M_VAL_40 + 1) * 40) / \ ((TA_PLL_N_VAL_40 + 1) * (TA_PLL_P_VAL_40 + 1))) diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 786dccd0b732..327638cdd30b 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -115,6 +115,7 @@ #define FW_FLASH_OFFSET 0x820 #define LMAC_VER_OFFSET (FW_FLASH_OFFSET + 0x200) #define MAX_DWORD_ALIGN_BYTES 64 +#define RSI_COMMON_REG_SIZE 2 struct bl_header { __le32 flags; @@ -167,6 +168,8 @@ struct rsi_bt_desc { } __packed; int rsi_hal_device_init(struct rsi_hw *adapter); +int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb); +int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb); int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb); int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb); int rsi_send_bt_pkt(struct rsi_common *common, struct sk_buff *skb); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index ef4fa323694b..a084f224bb03 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -190,12 +190,6 @@ struct cqm_info { u32 rssi_hyst; }; -struct xtended_desc { - u8 confirm_frame_type; - u8 retry_cnt; - u16 reserved; -}; - enum rsi_dfs_regions { RSI_REGION_FCC = 0, RSI_REGION_ETSI, @@ -293,6 +287,7 @@ struct rsi_common { struct timer_list roc_timer; struct ieee80211_vif *roc_vif; + bool eapol4_confirm; void *bt_adapter; }; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index cf6567ae5bbe..14620935c925 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -33,6 +33,7 @@ #define WMM_SHORT_SLOT_TIME 9 #define SIFS_DURATION 16 +#define EAPOL4_PACKET_LEN 0x85 #define KEY_TYPE_CLEAR 0 #define RSI_PAIRWISE_KEY 1 #define RSI_GROUP_KEY 2 @@ -62,9 +63,12 @@ #define RX_DOT11_MGMT 0x02 #define TX_STATUS_IND 0x04 #define BEACON_EVENT_IND 0x08 +#define EAPOL4_CONFIRM 1 #define PROBEREQ_CONFIRM 2 #define CARD_READY_IND 0x00 #define SLEEP_NOTIFY_IND 0x06 +#define RSI_TX_STATUS_TYPE 15 +#define RSI_TX_STATUS 12 #define RSI_DELETE_PEER 0x0 #define RSI_ADD_PEER 0x1 @@ -221,6 +225,9 @@ #define RSI_WOW_DISCONNECT BIT(5) #endif +#define RSI_MAX_TX_AGGR_FRMS 8 +#define RSI_MAX_RX_AGGR_FRMS 8 + enum opmode { RSI_OPMODE_UNSUPPORTED = -1, RSI_OPMODE_AP = 0, @@ -301,6 +308,12 @@ struct rsi_mac_frame { #define ENCAP_MGMT_PKT BIT(7) #define DESC_IMMEDIATE_WAKEUP BIT(15) +struct rsi_xtended_desc { + u8 confirm_frame_type; + u8 retry_cnt; + u16 reserved; +}; + struct rsi_cmd_desc_dword0 { __le16 len_qno; u8 frame_type; @@ -654,10 +667,14 @@ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel); int rsi_send_vap_dynamic_update(struct rsi_common *common); int rsi_send_block_unblock_frame(struct rsi_common *common, bool event); +int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode, + u8 notify_event, const unsigned char *bssid, + u8 qos_enable, u16 aid, u16 sta_id, + struct ieee80211_vif *vif); void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode, u8 status, const u8 *addr, u8 qos_enable, u16 aid, struct ieee80211_sta *sta, u16 sta_id, - struct ieee80211_vif *vif); + u16 assoc_cap, struct ieee80211_vif *vif); void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb); int rsi_mac80211_attach(struct rsi_common *common); void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb, diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index ead8e7c4df3a..353dbdf31e75 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -87,7 +87,7 @@ enum sdio_interrupt_type { #define TA_SOFT_RST_CLR 0 #define TA_SOFT_RST_SET BIT(0) #define TA_PC_ZERO 0 -#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF) +#define TA_HOLD_THREAD_VALUE 0xF #define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF) #define TA_BASE_ADDR 0x2200 #define MISC_CFG_BASE_ADDR 0x4105 diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index a88d59295a98..b6fe79f0a513 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -26,6 +26,7 @@ #define RSI_USB_READY_MAGIC_NUM 0xab #define FW_STATUS_REG 0x41050012 #define RSI_TA_HOLD_REG 0x22000844 +#define RSI_FW_WDT_DISABLE_REQ 0x69 #define USB_VENDOR_REGISTER_READ 0x15 #define USB_VENDOR_REGISTER_WRITE 0x16 diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c index e9050b41157a..f7b1b0062db3 100644 --- a/drivers/net/wireless/st/cw1200/txrx.c +++ b/drivers/net/wireless/st/cw1200/txrx.c @@ -1069,7 +1069,7 @@ void cw1200_rx_cb(struct cw1200_common *priv, } if (skb->len < sizeof(struct ieee80211_pspoll)) { - wiphy_warn(priv->hw->wiphy, "Mailformed SDU rx'ed. Size is lesser than IEEE header.\n"); + wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than IEEE header.\n"); goto drop; } diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 1f727babbea0..750bea3574ee 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -155,47 +155,40 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) struct mmc_card *card = func->card; ret = pm_runtime_get_sync(&card->dev); - if (ret) { - /* - * Runtime PM might be temporarily disabled, or the device - * might have a positive reference counter. Make sure it is - * really powered on. - */ - ret = mmc_power_restore_host(card->host); - if (ret < 0) { - pm_runtime_put_sync(&card->dev); - goto out; - } + if (ret < 0) { + pm_runtime_put_noidle(&card->dev); + dev_err(glue->dev, "%s: failed to get_sync(%d)\n", + __func__, ret); + + return ret; } sdio_claim_host(func); sdio_enable_func(func); sdio_release_host(func); -out: - return ret; + return 0; } static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { - int ret; struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; + int error; sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); - /* Power off the card manually in case it wasn't powered off above */ - ret = mmc_power_save_host(card->host); - if (ret < 0) - goto out; - /* Let runtime PM know the card is powered off */ - pm_runtime_put_sync(&card->dev); + error = pm_runtime_put(&card->dev); + if (error < 0 && error != -EBUSY) { + dev_err(&card->dev, "%s failed: %i\n", __func__, error); -out: - return ret; + return error; + } + + return 0; } static int wl12xx_sdio_set_power(struct device *child, bool enable) @@ -406,6 +399,11 @@ static int wl1271_suspend(struct device *dev) mmc_pm_flag_t sdio_flags; int ret = 0; + if (!wl) { + dev_err(dev, "no wilink module was probed\n"); + goto out; + } + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", wl->wow_enabled); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 8c0c92712fc9..d963baf8e53a 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -204,6 +204,9 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) bool scanphys = false; int addr, rc; + if (!np) + return mdiobus_register(mdio); + /* Do not continue if the node is disabled */ if (!of_device_is_available(np)) return -ENODEV; diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c index a0d522154cdf..4ef429250d7b 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c @@ -135,19 +135,25 @@ struct mvebu_comhy_conf { static const struct mvebu_comhy_conf mvebu_comphy_cp110_modes[] = { /* lane 0 */ MVEBU_COMPHY_CONF(0, 1, PHY_MODE_SGMII, 0x1), + MVEBU_COMPHY_CONF(0, 1, PHY_MODE_2500SGMII, 0x1), /* lane 1 */ MVEBU_COMPHY_CONF(1, 2, PHY_MODE_SGMII, 0x1), + MVEBU_COMPHY_CONF(1, 2, PHY_MODE_2500SGMII, 0x1), /* lane 2 */ MVEBU_COMPHY_CONF(2, 0, PHY_MODE_SGMII, 0x1), + MVEBU_COMPHY_CONF(2, 0, PHY_MODE_2500SGMII, 0x1), MVEBU_COMPHY_CONF(2, 0, PHY_MODE_10GKR, 0x1), /* lane 3 */ MVEBU_COMPHY_CONF(3, 1, PHY_MODE_SGMII, 0x2), + MVEBU_COMPHY_CONF(3, 1, PHY_MODE_2500SGMII, 0x2), /* lane 4 */ MVEBU_COMPHY_CONF(4, 0, PHY_MODE_SGMII, 0x2), + MVEBU_COMPHY_CONF(4, 0, PHY_MODE_2500SGMII, 0x2), MVEBU_COMPHY_CONF(4, 0, PHY_MODE_10GKR, 0x2), MVEBU_COMPHY_CONF(4, 1, PHY_MODE_SGMII, 0x1), /* lane 5 */ MVEBU_COMPHY_CONF(5, 2, PHY_MODE_SGMII, 0x1), + MVEBU_COMPHY_CONF(5, 2, PHY_MODE_2500SGMII, 0x1), }; struct mvebu_comphy_priv { @@ -206,6 +212,10 @@ static void mvebu_comphy_ethernet_init_reset(struct mvebu_comphy_lane *lane, if (mode == PHY_MODE_10GKR) val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0xe) | MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0xe); + else if (mode == PHY_MODE_2500SGMII) + val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x8) | + MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x8) | + MVEBU_COMPHY_SERDES_CFG0_HALF_BUS; else if (mode == PHY_MODE_SGMII) val |= MVEBU_COMPHY_SERDES_CFG0_GEN_RX(0x6) | MVEBU_COMPHY_SERDES_CFG0_GEN_TX(0x6) | @@ -296,13 +306,13 @@ static int mvebu_comphy_init_plls(struct mvebu_comphy_lane *lane, return 0; } -static int mvebu_comphy_set_mode_sgmii(struct phy *phy) +static int mvebu_comphy_set_mode_sgmii(struct phy *phy, enum phy_mode mode) { struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); struct mvebu_comphy_priv *priv = lane->priv; u32 val; - mvebu_comphy_ethernet_init_reset(lane, PHY_MODE_SGMII); + mvebu_comphy_ethernet_init_reset(lane, mode); val = readl(priv->base + MVEBU_COMPHY_RX_CTRL1(lane->id)); val &= ~MVEBU_COMPHY_RX_CTRL1_CLK8T_EN; @@ -487,7 +497,8 @@ static int mvebu_comphy_power_on(struct phy *phy) switch (lane->mode) { case PHY_MODE_SGMII: - ret = mvebu_comphy_set_mode_sgmii(phy); + case PHY_MODE_2500SGMII: + ret = mvebu_comphy_set_mode_sgmii(phy, lane->mode); break; case PHY_MODE_10GKR: ret = mvebu_comphy_set_mode_10gkr(phy); diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index a21ad10d613c..474c988d2e95 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -41,19 +41,19 @@ config PTP_1588_CLOCK_DTE To compile this driver as a module, choose M here: the module will be called ptp_dte. -config PTP_1588_CLOCK_GIANFAR - tristate "Freescale eTSEC as PTP clock" +config PTP_1588_CLOCK_QORIQ + tristate "Freescale QorIQ 1588 timer as PTP clock" depends on GIANFAR depends on PTP_1588_CLOCK default y help - This driver adds support for using the eTSEC as a PTP - clock. This clock is only useful if your PTP programs are - getting hardware time stamps on the PTP Ethernet packets - using the SO_TIMESTAMPING API. + This driver adds support for using the Freescale QorIQ 1588 + timer as a PTP clock. This clock is only useful if your PTP + programs are getting hardware time stamps on the PTP Ethernet + packets using the SO_TIMESTAMPING API. To compile this driver as a module, choose M here: the module - will be called gianfar_ptp. + will be called ptp_qoriq. config PTP_1588_CLOCK_IXP46X tristate "Intel IXP46x as PTP clock" diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index fd28207f5379..19efa9cfa950 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o +obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index b3285175f20f..78ccf936d356 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -452,7 +452,6 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta) static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs __iomem *regs = pch_dev->regs; @@ -461,8 +460,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) ns = pch_systime_read(regs); spin_unlock_irqrestore(&pch_dev->register_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -474,8 +472,7 @@ static int ptp_pch_settime(struct ptp_clock_info *ptp, struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); struct pch_ts_regs __iomem *regs = pch_dev->regs; - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); spin_lock_irqsave(&pch_dev->register_lock, flags); pch_systime_write(regs, ns); diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c new file mode 100644 index 000000000000..1468a1642b49 --- /dev/null +++ b/drivers/ptp/ptp_qoriq.c @@ -0,0 +1,454 @@ +/* + * PTP 1588 clock for Freescale QorIQ 1588 timer + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/device.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/timex.h> +#include <linux/slab.h> + +#include <linux/fsl/ptp_qoriq.h> + +/* + * Register access functions + */ + +/* Caller must hold qoriq_ptp->lock. */ +static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) +{ + u64 ns; + u32 lo, hi; + + lo = qoriq_read(&qoriq_ptp->regs->tmr_cnt_l); + hi = qoriq_read(&qoriq_ptp->regs->tmr_cnt_h); + ns = ((u64) hi) << 32; + ns |= lo; + return ns; +} + +/* Caller must hold qoriq_ptp->lock. */ +static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns) +{ + u32 hi = ns >> 32; + u32 lo = ns & 0xffffffff; + + qoriq_write(&qoriq_ptp->regs->tmr_cnt_l, lo); + qoriq_write(&qoriq_ptp->regs->tmr_cnt_h, hi); +} + +/* Caller must hold qoriq_ptp->lock. */ +static void set_alarm(struct qoriq_ptp *qoriq_ptp) +{ + u64 ns; + u32 lo, hi; + + ns = tmr_cnt_read(qoriq_ptp) + 1500000000ULL; + ns = div_u64(ns, 1000000000UL) * 1000000000ULL; + ns -= qoriq_ptp->tclk_period; + hi = ns >> 32; + lo = ns & 0xffffffff; + qoriq_write(&qoriq_ptp->regs->tmr_alarm1_l, lo); + qoriq_write(&qoriq_ptp->regs->tmr_alarm1_h, hi); +} + +/* Caller must hold qoriq_ptp->lock. */ +static void set_fipers(struct qoriq_ptp *qoriq_ptp) +{ + set_alarm(qoriq_ptp); + qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); +} + +/* + * Interrupt service routine + */ + +static irqreturn_t isr(int irq, void *priv) +{ + struct qoriq_ptp *qoriq_ptp = priv; + struct ptp_clock_event event; + u64 ns; + u32 ack = 0, lo, hi, mask, val; + + val = qoriq_read(&qoriq_ptp->regs->tmr_tevent); + + if (val & ETS1) { + ack |= ETS1; + hi = qoriq_read(&qoriq_ptp->regs->tmr_etts1_h); + lo = qoriq_read(&qoriq_ptp->regs->tmr_etts1_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(qoriq_ptp->clock, &event); + } + + if (val & ETS2) { + ack |= ETS2; + hi = qoriq_read(&qoriq_ptp->regs->tmr_etts2_h); + lo = qoriq_read(&qoriq_ptp->regs->tmr_etts2_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(qoriq_ptp->clock, &event); + } + + if (val & ALM2) { + ack |= ALM2; + if (qoriq_ptp->alarm_value) { + event.type = PTP_CLOCK_ALARM; + event.index = 0; + event.timestamp = qoriq_ptp->alarm_value; + ptp_clock_event(qoriq_ptp->clock, &event); + } + if (qoriq_ptp->alarm_interval) { + ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval; + hi = ns >> 32; + lo = ns & 0xffffffff; + spin_lock(&qoriq_ptp->lock); + qoriq_write(&qoriq_ptp->regs->tmr_alarm2_l, lo); + qoriq_write(&qoriq_ptp->regs->tmr_alarm2_h, hi); + spin_unlock(&qoriq_ptp->lock); + qoriq_ptp->alarm_value = ns; + } else { + qoriq_write(&qoriq_ptp->regs->tmr_tevent, ALM2); + spin_lock(&qoriq_ptp->lock); + mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + mask &= ~ALM2EN; + qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + spin_unlock(&qoriq_ptp->lock); + qoriq_ptp->alarm_value = 0; + qoriq_ptp->alarm_interval = 0; + } + } + + if (val & PP1) { + ack |= PP1; + event.type = PTP_CLOCK_PPS; + ptp_clock_event(qoriq_ptp->clock, &event); + } + + if (ack) { + qoriq_write(&qoriq_ptp->regs->tmr_tevent, ack); + return IRQ_HANDLED; + } else + return IRQ_NONE; +} + +/* + * PTP clock operations + */ + +static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + u64 adj, diff; + u32 tmr_add; + int neg_adj = 0; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + if (scaled_ppm < 0) { + neg_adj = 1; + scaled_ppm = -scaled_ppm; + } + tmr_add = qoriq_ptp->tmr_add; + adj = tmr_add; + + /* calculate diff as adj*(scaled_ppm/65536)/1000000 + * and round() to the nearest integer + */ + adj *= scaled_ppm; + diff = div_u64(adj, 8000000); + diff = (diff >> 13) + ((diff >> 12) & 1); + + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; + + qoriq_write(&qoriq_ptp->regs->tmr_add, tmr_add); + + return 0; +} + +static int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + unsigned long flags; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + now = tmr_cnt_read(qoriq_ptp); + now += delta; + tmr_cnt_write(qoriq_ptp, now); + set_fipers(qoriq_ptp); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + return 0; +} + +static int ptp_qoriq_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + ns = tmr_cnt_read(qoriq_ptp); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int ptp_qoriq_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + tmr_cnt_write(qoriq_ptp, ns); + set_fipers(qoriq_ptp); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + return 0; +} + +static int ptp_qoriq_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + unsigned long flags; + u32 bit, mask; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + switch (rq->extts.index) { + case 0: + bit = ETS1EN; + break; + case 1: + bit = ETS2EN; + break; + default: + return -EINVAL; + } + spin_lock_irqsave(&qoriq_ptp->lock, flags); + mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + if (on) + mask |= bit; + else + mask &= ~bit; + qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&qoriq_ptp->lock, flags); + mask = qoriq_read(&qoriq_ptp->regs->tmr_temask); + if (on) + mask |= PP1EN; + else + mask &= ~PP1EN; + qoriq_write(&qoriq_ptp->regs->tmr_temask, mask); + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static const struct ptp_clock_info ptp_qoriq_caps = { + .owner = THIS_MODULE, + .name = "qoriq ptp clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = N_EXT_TS, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfine = ptp_qoriq_adjfine, + .adjtime = ptp_qoriq_adjtime, + .gettime64 = ptp_qoriq_gettime, + .settime64 = ptp_qoriq_settime, + .enable = ptp_qoriq_enable, +}; + +static int qoriq_ptp_probe(struct platform_device *dev) +{ + struct device_node *node = dev->dev.of_node; + struct qoriq_ptp *qoriq_ptp; + struct timespec64 now; + int err = -ENOMEM; + u32 tmr_ctrl; + unsigned long flags; + + qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL); + if (!qoriq_ptp) + goto no_memory; + + err = -ENODEV; + + qoriq_ptp->caps = ptp_qoriq_caps; + + if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel)) + qoriq_ptp->cksel = DEFAULT_CKSEL; + + if (of_property_read_u32(node, + "fsl,tclk-period", &qoriq_ptp->tclk_period) || + of_property_read_u32(node, + "fsl,tmr-prsc", &qoriq_ptp->tmr_prsc) || + of_property_read_u32(node, + "fsl,tmr-add", &qoriq_ptp->tmr_add) || + of_property_read_u32(node, + "fsl,tmr-fiper1", &qoriq_ptp->tmr_fiper1) || + of_property_read_u32(node, + "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) || + of_property_read_u32(node, + "fsl,max-adj", &qoriq_ptp->caps.max_adj)) { + pr_err("device tree node missing required elements\n"); + goto no_node; + } + + qoriq_ptp->irq = platform_get_irq(dev, 0); + + if (qoriq_ptp->irq < 0) { + pr_err("irq not in device tree\n"); + goto no_node; + } + if (request_irq(qoriq_ptp->irq, isr, 0, DRIVER, qoriq_ptp)) { + pr_err("request_irq failed\n"); + goto no_node; + } + + qoriq_ptp->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!qoriq_ptp->rsrc) { + pr_err("no resource\n"); + goto no_resource; + } + if (request_resource(&iomem_resource, qoriq_ptp->rsrc)) { + pr_err("resource busy\n"); + goto no_resource; + } + + spin_lock_init(&qoriq_ptp->lock); + + qoriq_ptp->regs = ioremap(qoriq_ptp->rsrc->start, + resource_size(qoriq_ptp->rsrc)); + if (!qoriq_ptp->regs) { + pr_err("ioremap ptp registers failed\n"); + goto no_ioremap; + } + getnstimeofday64(&now); + ptp_qoriq_settime(&qoriq_ptp->caps, &now); + + tmr_ctrl = + (qoriq_ptp->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | + (qoriq_ptp->cksel & CKSEL_MASK) << CKSEL_SHIFT; + + spin_lock_irqsave(&qoriq_ptp->lock, flags); + + qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl); + qoriq_write(&qoriq_ptp->regs->tmr_add, qoriq_ptp->tmr_add); + qoriq_write(&qoriq_ptp->regs->tmr_prsc, qoriq_ptp->tmr_prsc); + qoriq_write(&qoriq_ptp->regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); + qoriq_write(&qoriq_ptp->regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + set_alarm(qoriq_ptp); + qoriq_write(&qoriq_ptp->regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); + + spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + + qoriq_ptp->clock = ptp_clock_register(&qoriq_ptp->caps, &dev->dev); + if (IS_ERR(qoriq_ptp->clock)) { + err = PTR_ERR(qoriq_ptp->clock); + goto no_clock; + } + qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock); + + platform_set_drvdata(dev, qoriq_ptp); + + return 0; + +no_clock: + iounmap(qoriq_ptp->regs); +no_ioremap: + release_resource(qoriq_ptp->rsrc); +no_resource: + free_irq(qoriq_ptp->irq, qoriq_ptp); +no_node: + kfree(qoriq_ptp); +no_memory: + return err; +} + +static int qoriq_ptp_remove(struct platform_device *dev) +{ + struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev); + + qoriq_write(&qoriq_ptp->regs->tmr_temask, 0); + qoriq_write(&qoriq_ptp->regs->tmr_ctrl, 0); + + ptp_clock_unregister(qoriq_ptp->clock); + iounmap(qoriq_ptp->regs); + release_resource(qoriq_ptp->rsrc); + free_irq(qoriq_ptp->irq, qoriq_ptp); + kfree(qoriq_ptp); + + return 0; +} + +static const struct of_device_id match_table[] = { + { .compatible = "fsl,etsec-ptp" }, + {}, +}; +MODULE_DEVICE_TABLE(of, match_table); + +static struct platform_driver qoriq_ptp_driver = { + .driver = { + .name = "ptp_qoriq", + .of_match_table = match_table, + }, + .probe = qoriq_ptp_probe, + .remove = qoriq_ptp_remove, +}; + +module_platform_driver(qoriq_ptp_driver); + +MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); +MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 0ee8f33efb54..2d9fe7e4ee40 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1928,6 +1928,8 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char return -EINVAL; /* TODO: sanity checks */ card->portno = value; + if (card->dev) + card->dev->dev_port = card->portno; return count; @@ -2158,6 +2160,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) card->dev = dev; card->dev->ml_priv = card; card->dev->netdev_ops = &lcs_netdev_ops; + card->dev->dev_port = card->portno; memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); #ifdef CONFIG_IP_MULTICAST if (!lcs_check_multicast_support(card)) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 78b98b3e7efa..2a5fec55bf60 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -148,6 +148,7 @@ struct qeth_perf_stats { unsigned int tx_csum; unsigned int tx_lin; unsigned int tx_linfail; + unsigned int rx_csum; }; /* Routing stuff */ @@ -712,9 +713,6 @@ enum qeth_discipline_id { struct qeth_discipline { const struct device_type *devtype; - void (*start_poll)(struct ccw_device *, int, unsigned long); - qdio_handler_t *input_handler; - qdio_handler_t *output_handler; int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); int (*recover)(void *ptr); int (*setup) (struct ccwgroup_device *); @@ -780,9 +778,9 @@ struct qeth_card { struct qeth_card_options options; wait_queue_head_t wait_q; - spinlock_t vlanlock; spinlock_t mclock; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct mutex vid_list_mutex; /* vid_list */ struct list_head vid_list; DECLARE_HASHTABLE(mac_htable, 4); DECLARE_HASHTABLE(ip_htable, 4); @@ -867,6 +865,32 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) } } +static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb, + u8 flags) +{ + if ((card->dev->features & NETIF_F_RXCSUM) && + (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (card->options.performance_stats) + card->perf_stats.rx_csum++; + } else { + skb->ip_summed = CHECKSUM_NONE; + } +} + +static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv) +{ + *flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; + if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) || + (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) + *flags |= QETH_HDR_EXT_UDP; + if (ipv == 4) { + /* some HW requires combined L3+L4 csum offload: */ + *flags |= QETH_HDR_EXT_CSUM_HDR_REQ; + ip_hdr(skb)->check = 0; + } +} + static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, struct qeth_buffer_pool_entry *entry) { @@ -879,6 +903,27 @@ static inline int qeth_is_diagass_supported(struct qeth_card *card, return card->info.diagass_support & (__u32)cmd; } +int qeth_send_simple_setassparms_prot(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data, + enum qeth_prot_versions prot); +/* IPv4 variant */ +static inline int qeth_send_simple_setassparms(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data) +{ + return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code, + data, QETH_PROT_IPV4); +} + +static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data) +{ + return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code, + data, QETH_PROT_IPV6); +} + extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; extern const struct attribute_group *qeth_generic_attr_groups[]; @@ -921,13 +966,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); void qeth_schedule_recovery(struct qeth_card *); -void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long); int qeth_poll(struct napi_struct *napi, int budget); -void qeth_qdio_input_handler(struct ccw_device *, - unsigned int, unsigned int, int, - int, unsigned long); -void qeth_qdio_output_handler(struct ccw_device *, unsigned int, - int, int, int, unsigned long); void qeth_clear_ipacmd_list(struct qeth_card *); int qeth_qdio_clear_card(struct qeth_card *, int); void qeth_clear_working_pool_list(struct qeth_card *); @@ -979,8 +1018,6 @@ int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); void qeth_trace_features(struct qeth_card *); void qeth_close_dev(struct qeth_card *); -int qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs, - __u16, long); int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, long, int (*reply_cb)(struct qeth_card *, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index f5a0d894d9ad..9f28b6f2efc4 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1467,13 +1467,13 @@ static int qeth_setup_card(struct qeth_card *card) card->lan_online = 0; card->read_or_write_problem = 0; card->dev = NULL; - spin_lock_init(&card->vlanlock); spin_lock_init(&card->mclock); spin_lock_init(&card->lock); spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); mutex_init(&card->discipline_mutex); + mutex_init(&card->vid_list_mutex); card->thread_start_mask = 0; card->thread_allowed_mask = 0; card->thread_running_mask = 0; @@ -3588,15 +3588,14 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) } } -void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, - unsigned long card_ptr) +static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, + unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *)card_ptr; if (card->dev && (card->dev->flags & IFF_UP)) napi_schedule(&card->napi); } -EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) { @@ -3698,9 +3697,10 @@ out: return; } -void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, - unsigned int queue, int first_elem, int count, - unsigned long card_ptr) +static void qeth_qdio_input_handler(struct ccw_device *ccwdev, + unsigned int qdio_err, int queue, + int first_elem, int count, + unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *)card_ptr; @@ -3711,14 +3711,12 @@ void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); else if (qdio_err) qeth_schedule_recovery(card); - - } -EXPORT_SYMBOL_GPL(qeth_qdio_input_handler); -void qeth_qdio_output_handler(struct ccw_device *ccwdev, - unsigned int qdio_error, int __queue, int first_element, - int count, unsigned long card_ptr) +static void qeth_qdio_output_handler(struct ccw_device *ccwdev, + unsigned int qdio_error, int __queue, + int first_element, int count, + unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; @@ -3787,7 +3785,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, card->perf_stats.outbound_handler_time += qeth_get_micros() - card->perf_stats.outbound_handler_start_time; } -EXPORT_SYMBOL_GPL(qeth_qdio_output_handler); /* We cannot use outbound queue 3 for unicast packets on HiperSockets */ static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) @@ -4995,7 +4992,7 @@ static int qeth_qdio_establish(struct qeth_card *card) goto out_free_in_sbals; } for (i = 0; i < card->qdio.no_in_queues; ++i) - queue_start_poll[i] = card->discipline->start_poll; + queue_start_poll[i] = qeth_qdio_start_poll; qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); @@ -5019,8 +5016,8 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.qib_param_field = qib_param_field; init_data.no_input_qs = card->qdio.no_in_queues; init_data.no_output_qs = card->qdio.no_out_queues; - init_data.input_handler = card->discipline->input_handler; - init_data.output_handler = card->discipline->output_handler; + init_data.input_handler = qeth_qdio_input_handler; + init_data.output_handler = qeth_qdio_output_handler; init_data.queue_start_poll_array = queue_start_poll; init_data.int_parm = (unsigned long) card; init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; @@ -5204,6 +5201,11 @@ retriable: rc = qeth_query_ipassists(card, QETH_PROT_IPV4); if (rc == -ENOMEM) goto out; + if (qeth_is_supported(card, IPA_IPV6)) { + rc = qeth_query_ipassists(card, QETH_PROT_IPV6); + if (rc == -ENOMEM) + goto out; + } if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { rc = qeth_query_setadapterparms(card); if (rc < 0) { @@ -5511,26 +5513,26 @@ int qeth_send_setassparms(struct qeth_card *card, } EXPORT_SYMBOL_GPL(qeth_send_setassparms); -int qeth_send_simple_setassparms(struct qeth_card *card, - enum qeth_ipa_funcs ipa_func, - __u16 cmd_code, long data) +int qeth_send_simple_setassparms_prot(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data, + enum qeth_prot_versions prot) { int rc; int length = 0; struct qeth_cmd_buffer *iob; - QETH_CARD_TEXT(card, 4, "simassp4"); + QETH_CARD_TEXT_(card, 4, "simassp%i", prot); if (data) length = sizeof(__u32); - iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, - length, QETH_PROT_IPV4); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); if (!iob) return -ENOMEM; rc = qeth_send_setassparms(card, iob, length, data, qeth_setassparms_cb, NULL); return rc; } -EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms); +EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); static void qeth_unregister_dbf_views(void) { @@ -6008,7 +6010,8 @@ static struct { {"tx lin"}, {"tx linfail"}, {"cq handler count"}, - {"cq handler time"} + {"cq handler time"}, + {"rx csum"} }; int qeth_core_get_sset_count(struct net_device *dev, int stringset) @@ -6070,6 +6073,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev, data[35] = card->perf_stats.tx_linfail; data[36] = card->perf_stats.cq_cnt; data[37] = card->perf_stats.cq_time; + data[38] = card->perf_stats.rx_csum; } EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); @@ -6326,14 +6330,15 @@ static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code, long data, - struct qeth_checksum_cmd *chksum_cb) + struct qeth_checksum_cmd *chksum_cb, + enum qeth_prot_versions prot) { struct qeth_cmd_buffer *iob; int rc = -ENOMEM; QETH_CARD_TEXT(card, 4, "chkdocmd"); iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, - sizeof(__u32), QETH_PROT_IPV4); + sizeof(__u32), prot); if (iob) rc = qeth_send_setassparms(card, iob, sizeof(__u32), data, qeth_ipa_checksum_run_cmd_cb, @@ -6341,16 +6346,17 @@ static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, return rc; } -static int qeth_send_checksum_on(struct qeth_card *card, int cstype) +static int qeth_send_checksum_on(struct qeth_card *card, int cstype, + enum qeth_prot_versions prot) { - const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR | - QETH_IPA_CHECKSUM_UDP | - QETH_IPA_CHECKSUM_TCP; + u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; struct qeth_checksum_cmd chksum_cb; int rc; + if (prot == QETH_PROT_IPV4) + required_features |= QETH_IPA_CHECKSUM_IP_HDR; rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, - &chksum_cb); + &chksum_cb, prot); if (!rc) { if ((required_features & chksum_cb.supported) != required_features) @@ -6362,37 +6368,42 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype) QETH_CARD_IFNAME(card)); } if (rc) { - qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); + qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, prot); dev_warn(&card->gdev->dev, - "Starting HW checksumming for %s failed, using SW checksumming\n", - QETH_CARD_IFNAME(card)); + "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n", + prot, QETH_CARD_IFNAME(card)); return rc; } rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, - chksum_cb.supported, &chksum_cb); + chksum_cb.supported, &chksum_cb, + prot); if (!rc) { if ((required_features & chksum_cb.enabled) != required_features) rc = -EIO; } if (rc) { - qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0); + qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, prot); dev_warn(&card->gdev->dev, - "Enabling HW checksumming for %s failed, using SW checksumming\n", - QETH_CARD_IFNAME(card)); + "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n", + prot, QETH_CARD_IFNAME(card)); return rc; } - dev_info(&card->gdev->dev, "HW Checksumming (%sbound) enabled\n", - cstype == IPA_INBOUND_CHECKSUM ? "in" : "out"); + dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", + cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); return 0; } -static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype) +static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, + enum qeth_prot_versions prot) { - int rc = (on) ? qeth_send_checksum_on(card, cstype) - : qeth_send_simple_setassparms(card, cstype, - IPA_CMD_ASS_STOP, 0); + int rc = (on) ? qeth_send_checksum_on(card, cstype, prot) + : qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, + prot); return rc ? -EIO : 0; } @@ -6419,8 +6430,31 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on) return rc; } -#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO) +static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) +{ + int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; + int rc_ipv6; + + if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) + rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, + QETH_PROT_IPV4); + if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) + /* no/one Offload Assist available, so the rc is trivial */ + return rc_ipv4; + rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, + QETH_PROT_IPV6); + + if (on) + /* enable: success if any Assist is active */ + return (rc_ipv6) ? rc_ipv4 : 0; + + /* disable: failure if any Assist is still active */ + return (rc_ipv6) ? rc_ipv6 : rc_ipv4; +} + +#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ + NETIF_F_IPV6_CSUM) /** * qeth_recover_features() - Restore device features after recovery * @dev: the recovering net_device @@ -6455,16 +6489,19 @@ int qeth_set_features(struct net_device *dev, netdev_features_t features) QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); if ((changed & NETIF_F_IP_CSUM)) { - rc = qeth_set_ipa_csum(card, - features & NETIF_F_IP_CSUM ? 1 : 0, - IPA_OUTBOUND_CHECKSUM); + rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, + IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4); if (rc) changed ^= NETIF_F_IP_CSUM; } - if ((changed & NETIF_F_RXCSUM)) { - rc = qeth_set_ipa_csum(card, - features & NETIF_F_RXCSUM ? 1 : 0, - IPA_INBOUND_CHECKSUM); + if (changed & NETIF_F_IPV6_CSUM) { + rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, + IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6); + if (rc) + changed ^= NETIF_F_IPV6_CSUM; + } + if (changed & NETIF_F_RXCSUM) { + rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); if (rc) changed ^= NETIF_F_RXCSUM; } @@ -6491,7 +6528,10 @@ netdev_features_t qeth_fix_features(struct net_device *dev, QETH_DBF_TEXT(SETUP, 2, "fixfeat"); if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) features &= ~NETIF_F_IP_CSUM; - if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) + if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) + features &= ~NETIF_F_IPV6_CSUM; + if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && + !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) features &= ~NETIF_F_RXCSUM; if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) features &= ~NETIF_F_TSO; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index f4d1ec0b8f5a..878e62f35169 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -246,6 +246,8 @@ enum qeth_ipa_funcs { IPA_QUERY_ARP_ASSIST = 0x00040000L, IPA_INBOUND_TSO = 0x00080000L, IPA_OUTBOUND_TSO = 0x00100000L, + IPA_INBOUND_CHECKSUM_V6 = 0x00400000L, + IPA_OUTBOUND_CHECKSUM_V6 = 0x00800000L, }; /* SETIP/DELIP IPA Command: ***************************************************/ diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index ae81534de912..c3f18afb368b 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -144,6 +144,8 @@ static ssize_t qeth_dev_portno_store(struct device *dev, goto out; } card->info.portno = portno; + if (card->dev) + card->dev->dev_port = portno; out: mutex_unlock(&card->conf_mutex); return rc ? rc : count; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index b8079f2a65b3..a7cb37da6a21 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -17,7 +17,6 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/etherdevice.h> -#include <linux/ip.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/hashtable.h> @@ -195,23 +194,6 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return RTN_UNSPEC; } -static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb) -{ - struct iphdr *iph = ip_hdr(skb); - - /* tcph->check contains already the pseudo hdr checksum - * so just set the header flags - */ - if (iph->protocol == IPPROTO_UDP) - hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_UDP; - hdr->hdr.l2.flags[1] |= QETH_HDR_EXT_CSUM_TRANSP_REQ | - QETH_HDR_EXT_CSUM_HDR_REQ; - iph->check = 0; - if (card->options.performance_stats) - card->perf_stats.tx_csum++; -} - static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, int cast_type, unsigned int data_len) { @@ -297,12 +279,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, static void qeth_l2_process_vlans(struct qeth_card *card) { struct qeth_vlan_vid *id; + QETH_CARD_TEXT(card, 3, "L2prcvln"); - spin_lock_bh(&card->vlanlock); + mutex_lock(&card->vid_list_mutex); list_for_each_entry(id, &card->vid_list, list) { qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN); } - spin_unlock_bh(&card->vlanlock); + mutex_unlock(&card->vid_list_mutex); } static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, @@ -319,7 +302,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, QETH_CARD_TEXT(card, 3, "aidREC"); return 0; } - id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC); + id = kmalloc(sizeof(*id), GFP_KERNEL); if (id) { id->vid = vid; rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); @@ -327,9 +310,9 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, kfree(id); return rc; } - spin_lock_bh(&card->vlanlock); + mutex_lock(&card->vid_list_mutex); list_add_tail(&id->list, &card->vid_list); - spin_unlock_bh(&card->vlanlock); + mutex_unlock(&card->vid_list_mutex); } else { return -ENOMEM; } @@ -348,7 +331,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, QETH_CARD_TEXT(card, 3, "kidREC"); return 0; } - spin_lock_bh(&card->vlanlock); + mutex_lock(&card->vid_list_mutex); list_for_each_entry(id, &card->vid_list, list) { if (id->vid == vid) { list_del(&id->list); @@ -356,7 +339,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, break; } } - spin_unlock_bh(&card->vlanlock); + mutex_unlock(&card->vid_list_mutex); if (tmpid) { rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); kfree(tmpid); @@ -423,15 +406,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, switch (hdr->hdr.l2.id) { case QETH_HEADER_TYPE_LAYER2: skb->protocol = eth_type_trans(skb, skb->dev); - if ((card->dev->features & NETIF_F_RXCSUM) - && ((hdr->hdr.l2.flags[1] & - (QETH_HDR_EXT_CSUM_HDR_REQ | - QETH_HDR_EXT_CSUM_TRANSP_REQ)) == - (QETH_HDR_EXT_CSUM_HDR_REQ | - QETH_HDR_EXT_CSUM_TRANSP_REQ))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; + qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]); if (skb->protocol == htons(ETH_P_802_2)) *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; len = skb->len; @@ -464,7 +439,6 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, static int qeth_l2_request_initial_mac(struct qeth_card *card) { int rc = 0; - char vendor_pre[] = {0x02, 0x00, 0x00}; QETH_DBF_TEXT(SETUP, 2, "l2reqmac"); QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); @@ -484,16 +458,20 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) card->info.type == QETH_CARD_TYPE_OSX || card->info.guestlan) { rc = qeth_setadpparms_change_macaddr(card); - if (rc) { - QETH_DBF_MESSAGE(2, "couldn't get MAC address on " - "device %s: x%x\n", CARD_BUS_ID(card), rc); - QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); - return rc; - } - } else { - eth_random_addr(card->dev->dev_addr); - memcpy(card->dev->dev_addr, vendor_pre, 3); + if (!rc) + goto out; + QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n", + CARD_BUS_ID(card), rc); + QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); + /* fall back once more: */ } + + /* some devices don't support a custom MAC address: */ + if (card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) + return (rc) ? rc : -EADDRNOTAVAIL; + eth_hw_addr_random(card->dev); + out: QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len); return 0; @@ -685,7 +663,8 @@ out: } static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, - struct qeth_qdio_out_q *queue, int cast_type) + struct qeth_qdio_out_q *queue, int cast_type, + int ipv) { int push_len = sizeof(struct qeth_hdr); unsigned int elements, nr_frags; @@ -723,8 +702,11 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, hdr_elements = 1; } qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); - if (skb->ip_summed == CHECKSUM_PARTIAL) - qeth_l2_hdr_csum(card, hdr, skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } elements = qeth_get_elements_no(card, skb, hdr_elements, 0); if (!elements) { @@ -776,6 +758,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, { struct qeth_card *card = dev->ml_priv; int cast_type = qeth_l2_get_cast_type(card, skb); + int ipv = qeth_get_ip_version(skb); struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; int rc; @@ -783,7 +766,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, if (card->qdio.do_prio_queueing || (cast_type && card->info.is_multicast_different)) queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, - qeth_get_ip_version(skb), cast_type)]; + ipv, cast_type)]; else queue = card->qdio.out_qs[card->qdio.default_out_queue]; @@ -806,7 +789,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type); break; default: - rc = qeth_l2_xmit_osa(card, skb, queue, cast_type); + rc = qeth_l2_xmit_osa(card, skb, queue, cast_type, ipv); } if (!rc) { @@ -983,6 +966,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->mtu = card->info.initial_mtu; card->dev->min_mtu = 64; card->dev->max_mtu = ETH_MAX_MTU; + card->dev->dev_port = card->info.portno; card->dev->netdev_ops = &qeth_l2_netdev_ops; if (card->info.type == QETH_CARD_TYPE_OSN) { card->dev->ethtool_ops = &qeth_l2_osn_ops; @@ -1011,10 +995,15 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->hw_features |= NETIF_F_IP_CSUM; card->dev->vlan_features |= NETIF_F_IP_CSUM; } - if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { - card->dev->hw_features |= NETIF_F_RXCSUM; - card->dev->vlan_features |= NETIF_F_RXCSUM; - } + } + if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_IPV6_CSUM; + card->dev->vlan_features |= NETIF_F_IPV6_CSUM; + } + if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) || + qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_RXCSUM; + card->dev->vlan_features |= NETIF_F_RXCSUM; } card->info.broadcast_capable = 1; @@ -1315,9 +1304,6 @@ static int qeth_l2_control_event(struct qeth_card *card, struct qeth_discipline qeth_l2_discipline = { .devtype = &qeth_l2_devtype, - .start_poll = qeth_qdio_start_poll, - .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, - .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, .process_rx_buffer = qeth_l2_process_inbound_buffer, .recover = qeth_l2_recover, .setup = qeth_l2_probe_device, diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index c1a16a74aa83..e7fa479adf47 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -735,22 +735,6 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) return rc; } -static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card, - enum qeth_ipa_funcs ipa_func, __u16 cmd_code) -{ - int rc; - struct qeth_cmd_buffer *iob; - - QETH_CARD_TEXT(card, 4, "simassp6"); - iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, - 0, QETH_PROT_IPV6); - if (!iob) - return -ENOMEM; - rc = qeth_send_setassparms(card, iob, 0, 0, - qeth_setassparms_cb, NULL); - return rc; -} - static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) { int rc; @@ -851,14 +835,6 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) QETH_CARD_TEXT(card, 3, "softipv6"); - rc = qeth_query_ipassists(card, QETH_PROT_IPV6); - if (rc) { - dev_err(&card->gdev->dev, - "Activating IPv6 support for %s failed\n", - QETH_CARD_IFNAME(card)); - return rc; - } - if (card->info.type == QETH_CARD_TYPE_IQD) goto out; @@ -870,16 +846,16 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) QETH_CARD_IFNAME(card)); return rc; } - rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, - IPA_CMD_ASS_START); + rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, + IPA_CMD_ASS_START, 0); if (rc) { dev_err(&card->gdev->dev, "Activating IPv6 support for %s failed\n", QETH_CARD_IFNAME(card)); return rc; } - rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, - IPA_CMD_ASS_START); + rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU, + IPA_CMD_ASS_START, 0); if (rc) { dev_warn(&card->gdev->dev, "Enabling the passthrough mode for %s failed\n", @@ -1293,91 +1269,6 @@ static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) in6_dev_put(in6_dev); } -static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, - unsigned short vid) -{ - struct in_device *in_dev; - struct in_ifaddr *ifa; - struct qeth_ipaddr *addr; - struct net_device *netdev; - - QETH_CARD_TEXT(card, 4, "frvaddr4"); - - netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid); - if (!netdev) - return; - in_dev = in_dev_get(netdev); - if (!in_dev) - return; - - addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); - if (!addr) - goto out; - - spin_lock_bh(&card->ip_lock); - - for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { - addr->u.a4.addr = be32_to_cpu(ifa->ifa_address); - addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask); - addr->type = QETH_IP_TYPE_NORMAL; - qeth_l3_delete_ip(card, addr); - } - - spin_unlock_bh(&card->ip_lock); - - kfree(addr); -out: - in_dev_put(in_dev); -} - -static void qeth_l3_free_vlan_addresses6(struct qeth_card *card, - unsigned short vid) -{ - struct inet6_dev *in6_dev; - struct inet6_ifaddr *ifa; - struct qeth_ipaddr *addr; - struct net_device *netdev; - - QETH_CARD_TEXT(card, 4, "frvaddr6"); - - netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), vid); - if (!netdev) - return; - - in6_dev = in6_dev_get(netdev); - if (!in6_dev) - return; - - addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); - if (!addr) - goto out; - - spin_lock_bh(&card->ip_lock); - - list_for_each_entry(ifa, &in6_dev->addr_list, if_list) { - memcpy(&addr->u.a6.addr, &ifa->addr, - sizeof(struct in6_addr)); - addr->u.a6.pfxlen = ifa->prefix_len; - addr->type = QETH_IP_TYPE_NORMAL; - qeth_l3_delete_ip(card, addr); - } - - spin_unlock_bh(&card->ip_lock); - - kfree(addr); -out: - in6_dev_put(in6_dev); -} - -static void qeth_l3_free_vlan_addresses(struct qeth_card *card, - unsigned short vid) -{ - rcu_read_lock(); - qeth_l3_free_vlan_addresses4(card, vid); - qeth_l3_free_vlan_addresses6(card, vid); - rcu_read_unlock(); -} - static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { @@ -1398,8 +1289,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, QETH_CARD_TEXT(card, 3, "kidREC"); return 0; } - /* unregister IP addresses of vlan device */ - qeth_l3_free_vlan_addresses(card, vid); clear_bit(vid, card->active_vlans); qeth_l3_set_rx_mode(dev); return 0; @@ -1454,17 +1343,7 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); } - if (card->dev->features & NETIF_F_RXCSUM) { - if ((hdr->hdr.l3.ext_flags & - (QETH_HDR_EXT_CSUM_HDR_REQ | - QETH_HDR_EXT_CSUM_TRANSP_REQ)) == - (QETH_HDR_EXT_CSUM_HDR_REQ | - QETH_HDR_EXT_CSUM_TRANSP_REQ)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; - } else - skb->ip_summed = CHECKSUM_NONE; + qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags); } static int qeth_l3_process_inbound_buffer(struct qeth_card *card, @@ -2210,23 +2089,6 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_unlock(); } -static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb) -{ - struct iphdr *iph = ip_hdr(skb); - - /* tcph->check contains already the pseudo hdr checksum - * so just set the header flags - */ - if (iph->protocol == IPPROTO_UDP) - hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; - hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ | - QETH_HDR_EXT_CSUM_HDR_REQ; - iph->check = 0; - if (card->options.performance_stats) - card->perf_stats.tx_csum++; -} - static void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr, struct sk_buff *skb) { @@ -2418,8 +2280,11 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } } - if (skb->ip_summed == CHECKSUM_PARTIAL) - qeth_l3_hdr_csum(card, hdr, new_skb); + if (new_skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(new_skb, &hdr->hdr.l3.ext_flags, ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } } elements = use_tso ? @@ -2620,28 +2485,32 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) (card->info.link_type == QETH_LINK_TYPE_HSTR)) { pr_info("qeth_l3: ignoring TR device\n"); return -ENODEV; - } else { - card->dev = alloc_etherdev(0); - if (!card->dev) - return -ENODEV; - card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; - - /*IPv6 address autoconfiguration stuff*/ - qeth_l3_get_unique_id(card); - if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) - card->dev->dev_id = card->info.unique_id & - 0xffff; - - card->dev->hw_features |= NETIF_F_SG; - card->dev->vlan_features |= NETIF_F_SG; - - if (!card->info.guestlan) { - card->dev->features |= NETIF_F_SG; - card->dev->hw_features |= NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_IP_CSUM; - card->dev->vlan_features |= NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_IP_CSUM; - } + } + + card->dev = alloc_etherdev(0); + if (!card->dev) + return -ENODEV; + card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; + + /*IPv6 address autoconfiguration stuff*/ + qeth_l3_get_unique_id(card); + if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) + card->dev->dev_id = card->info.unique_id & 0xffff; + + card->dev->hw_features |= NETIF_F_SG; + card->dev->vlan_features |= NETIF_F_SG; + + if (!card->info.guestlan) { + card->dev->features |= NETIF_F_SG; + card->dev->hw_features |= NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + card->dev->vlan_features |= NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + } + + if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_IPV6_CSUM; + card->dev->vlan_features |= NETIF_F_IPV6_CSUM; } } else if (card->info.type == QETH_CARD_TYPE_IQD) { card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, @@ -2663,6 +2532,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) card->dev->mtu = card->info.initial_mtu; card->dev->min_mtu = 64; card->dev->max_mtu = ETH_MAX_MTU; + card->dev->dev_port = card->info.portno; card->dev->ethtool_ops = &qeth_l3_ethtool_ops; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | @@ -2960,9 +2830,6 @@ static int qeth_l3_control_event(struct qeth_card *card, struct qeth_discipline qeth_l3_discipline = { .devtype = &qeth_l3_devtype, - .start_poll = qeth_qdio_start_poll, - .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, - .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, .process_rx_buffer = qeth_l3_process_inbound_buffer, .recover = qeth_l3_recover, .setup = qeth_l3_probe_device, diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 96bbb82c826d..a10cf25ee7f9 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -1500,8 +1500,8 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) CAP16_TO_CAP32(FC_RX); CAP16_TO_CAP32(FC_TX); CAP16_TO_CAP32(ANEG); - CAP16_TO_CAP32(MDIX); CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(MDISTRAIGHT); CAP16_TO_CAP32(FEC_RS); CAP16_TO_CAP32(FEC_BASER_RS); CAP16_TO_CAP32(802_3_PAUSE); diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index c105a2e48ac1..cabb6af60fb8 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -383,11 +383,16 @@ struct qedf_ctx { u32 flogi_failed; /* Used for fc statistics */ + struct mutex stats_mutex; u64 input_requests; u64 output_requests; u64 control_requests; u64 packet_aborts; u64 alloc_failures; + u8 lun_resets; + u8 target_resets; + u8 task_set_fulls; + u8 busy; }; struct io_bdt { @@ -496,7 +501,9 @@ extern int qedf_post_io_req(struct qedf_rport *fcport, extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req); extern int qedf_send_flogi(struct qedf_ctx *qedf); +extern void qedf_get_protocol_tlv_data(void *dev, void *data); extern void qedf_fp_io_handler(struct work_struct *work); +extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data); #define FCOE_WORD_TO_BYTE 4 #define QEDF_MAX_TASK_NUM 0xFFFF diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index c539a7ae3a7e..5789ce185923 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -439,7 +439,6 @@ qedf_dbg_offload_stats_open(struct inode *inode, struct file *file) return single_open(file, qedf_offload_stats_show, qedf); } - const struct file_operations qedf_dbg_fops[] = { qedf_dbg_fileops(qedf, fp_int), qedf_dbg_fileops_seq(qedf, io_trace), diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index 773558fc0697..16d1a21cdff9 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -23,6 +23,7 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) struct fip_vlan *vlan; #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; + unsigned long flags = 0; skb = dev_alloc_skb(sizeof(struct fip_vlan)); if (!skb) @@ -65,7 +66,9 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) kfree_skb(skb); return; } - qed_ops->ll2->start_xmit(qedf->cdev, skb); + + set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags); + qed_ops->ll2->start_xmit(qedf->cdev, skb, flags); } static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, @@ -139,7 +142,7 @@ void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); - qed_ops->ll2->start_xmit(qedf->cdev, skb); + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); } /* Process incoming FIP frames. */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 50a50c4249d0..3fe579d0f1a8 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1200,6 +1200,12 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, fcport->retry_delay_timestamp = jiffies + (qualifier * HZ / 10); } + /* Record stats */ + if (io_req->cdb_status == + SAM_STAT_TASK_SET_FULL) + qedf->task_set_fulls++; + else + qedf->busy++; } } if (io_req->fcp_resid) @@ -1866,6 +1872,11 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, goto reset_tmf_err; } + if (tm_flags == FCP_TMF_LUN_RESET) + qedf->lun_resets++; + else if (tm_flags == FCP_TMF_TGT_RESET) + qedf->target_resets++; + /* Initialize rest of io_req fields */ io_req->sc_cmd = sc_cmd; io_req->fcport = fcport; diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 284ccb566b19..d3f73d8d7738 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -566,6 +566,8 @@ static struct qed_fcoe_cb_ops qedf_cb_ops = { { .link_update = qedf_link_update, .dcbx_aen = qedf_dcbx_handler, + .get_generic_tlv_data = qedf_get_generic_tlv_data, + .get_protocol_tlv_data = qedf_get_protocol_tlv_data, } }; @@ -994,7 +996,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) if (qedf_dump_frames) print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb->len, false); - qed_ops->ll2->start_xmit(qedf->cdev, skb); + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); return 0; } @@ -1746,6 +1748,8 @@ static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host goto out; } + mutex_lock(&qedf->stats_mutex); + /* Query firmware for offload stats */ qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); @@ -1779,6 +1783,7 @@ static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host qedf_stats->fcp_packet_aborts += qedf->packet_aborts; qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; + mutex_unlock(&qedf->stats_mutex); kfree(fw_fcoe_stats); out: return qedf_stats; @@ -2948,6 +2953,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) qedf->stop_io_on_error = false; pci_set_drvdata(pdev, qedf); init_completion(&qedf->fipvlan_compl); + mutex_init(&qedf->stats_mutex); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "QLogic FastLinQ FCoE Module qedf %s, " @@ -3393,6 +3399,104 @@ static void qedf_remove(struct pci_dev *pdev) } /* + * Protocol TLV handler + */ +void qedf_get_protocol_tlv_data(void *dev, void *data) +{ + struct qedf_ctx *qedf = dev; + struct qed_mfw_tlv_fcoe *fcoe = data; + struct fc_lport *lport = qedf->lport; + struct Scsi_Host *host = lport->host; + struct fc_host_attrs *fc_host = shost_to_fc_host(host); + struct fc_host_statistics *hst; + + /* Force a refresh of the fc_host stats including offload stats */ + hst = qedf_fc_get_host_stats(host); + + fcoe->qos_pri_set = true; + fcoe->qos_pri = 3; /* Hard coded to 3 in driver */ + + fcoe->ra_tov_set = true; + fcoe->ra_tov = lport->r_a_tov; + + fcoe->ed_tov_set = true; + fcoe->ed_tov = lport->e_d_tov; + + fcoe->npiv_state_set = true; + fcoe->npiv_state = 1; /* NPIV always enabled */ + + fcoe->num_npiv_ids_set = true; + fcoe->num_npiv_ids = fc_host->npiv_vports_inuse; + + /* Certain attributes we only want to set if we've selected an FCF */ + if (qedf->ctlr.sel_fcf) { + fcoe->switch_name_set = true; + u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name); + } + + fcoe->port_state_set = true; + /* For qedf we're either link down or fabric attach */ + if (lport->link_up) + fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC; + else + fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE; + + fcoe->link_failures_set = true; + fcoe->link_failures = (u16)hst->link_failure_count; + + fcoe->fcoe_txq_depth_set = true; + fcoe->fcoe_rxq_depth_set = true; + fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS; + fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS; + + fcoe->fcoe_rx_frames_set = true; + fcoe->fcoe_rx_frames = hst->rx_frames; + + fcoe->fcoe_tx_frames_set = true; + fcoe->fcoe_tx_frames = hst->tx_frames; + + fcoe->fcoe_rx_bytes_set = true; + fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000; + + fcoe->fcoe_tx_bytes_set = true; + fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000; + + fcoe->crc_count_set = true; + fcoe->crc_count = hst->invalid_crc_count; + + fcoe->tx_abts_set = true; + fcoe->tx_abts = hst->fcp_packet_aborts; + + fcoe->tx_lun_rst_set = true; + fcoe->tx_lun_rst = qedf->lun_resets; + + fcoe->abort_task_sets_set = true; + fcoe->abort_task_sets = qedf->packet_aborts; + + fcoe->scsi_busy_set = true; + fcoe->scsi_busy = qedf->busy; + + fcoe->scsi_tsk_full_set = true; + fcoe->scsi_tsk_full = qedf->task_set_fulls; +} + +/* Generic TLV data callback */ +void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) +{ + struct qedf_ctx *qedf; + + if (!dev) { + QEDF_INFO(NULL, QEDF_LOG_EVT, + "dev is NULL so ignoring get_generic_tlv_data request.\n"); + return; + } + qedf = (struct qedf_ctx *)dev; + + memset(data, 0, sizeof(struct qed_generic_tlvs)); + ether_addr_copy(data->mac[0], qedf->mac); +} + +/* * Module Init/Remove */ diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h index b8b22ce60ecc..fc3babc15fa3 100644 --- a/drivers/scsi/qedi/qedi.h +++ b/drivers/scsi/qedi/qedi.h @@ -353,6 +353,9 @@ struct qedi_ctx { #define IPV6_LEN 41 #define IPV4_LEN 17 struct iscsi_boot_kset *boot_kset; + + /* Used for iscsi statistics */ + struct mutex stats_lock; }; struct qedi_work { diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 7ec7f6e00fb8..2f0a4f2c5ff8 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -471,14 +471,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) else hdrs += IPV4_HDR_LEN; - if (vlan_en) - hdrs += VLAN_LEN; - mss = pmtu - hdrs; - if (tcp_ts_en) - mss -= TCP_OPTION_LEN; - if (!mss) mss = DEF_MSS; @@ -1150,7 +1144,7 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid) if (vlanid) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); - rc = qedi_ops->ll2->start_xmit(cdev, skb); + rc = qedi_ops->ll2->start_xmit(cdev, skb, 0); if (rc) { QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", rc); diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index ea1315189922..11260776212f 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -223,6 +223,12 @@ struct qedi_work_map { struct work_struct *ptr_tmf_work; }; +struct qedi_boot_target { + char ip_addr[64]; + char iscsi_name[255]; + u32 ipv6_en; +}; + #define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16))) #define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16) diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 4da3592aec0f..32ee7f62fef9 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -55,6 +55,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi); static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid); static void qedi_reset_uio_rings(struct qedi_uio_dev *udev); static void qedi_ll2_free_skbs(struct qedi_ctx *qedi); +static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi); static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) { @@ -879,6 +880,201 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi) kfree(qedi->global_queues); } +static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block, + struct qedi_boot_target *tgt, u8 index) +{ + u32 ipv6_en; + + ipv6_en = !!(block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_IPV6_ENABLED); + + snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", + block->target[index].target_name.byte); + + tgt->ipv6_en = ipv6_en; + + if (ipv6_en) + snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n", + block->target[index].ipv6_addr.byte); + else + snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n", + block->target[index].ipv4_addr.byte); +} + +static int qedi_find_boot_info(struct qedi_ctx *qedi, + struct qed_mfw_tlv_iscsi *iscsi, + struct nvm_iscsi_block *block) +{ + struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL; + u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0; + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + struct qedi_conn *qedi_conn; + struct iscsi_session *sess; + struct iscsi_conn *conn; + char ep_ip_addr[64]; + int i, ret = 0; + + pri_ctrl_flags = !!(block->target[0].ctrl_flags & + NVM_ISCSI_CFG_TARGET_ENABLED); + if (pri_ctrl_flags) { + pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL); + if (!pri_tgt) + return -1; + qedi_get_boot_tgt_info(block, pri_tgt, 0); + } + + sec_ctrl_flags = !!(block->target[1].ctrl_flags & + NVM_ISCSI_CFG_TARGET_ENABLED); + if (sec_ctrl_flags) { + sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL); + if (!sec_tgt) { + ret = -1; + goto free_tgt; + } + qedi_get_boot_tgt_info(block, sec_tgt, 1); + } + + for (i = 0; i < qedi->max_active_conns; i++) { + qedi_conn = qedi_get_conn_from_id(qedi, i); + if (!qedi_conn) + continue; + + if (qedi_conn->ep->ip_type == TCP_IPV4) + snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n", + qedi_conn->ep->dst_addr); + else + snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n", + qedi_conn->ep->dst_addr); + + cls_conn = qedi_conn->cls_conn; + conn = cls_conn->dd_data; + cls_sess = iscsi_conn_to_session(cls_conn); + sess = cls_sess->dd_data; + + if (pri_ctrl_flags) { + if (!strcmp(pri_tgt->iscsi_name, sess->targetname) && + !strcmp(pri_tgt->ip_addr, ep_ip_addr)) { + found = 1; + break; + } + } + + if (sec_ctrl_flags) { + if (!strcmp(sec_tgt->iscsi_name, sess->targetname) && + !strcmp(sec_tgt->ip_addr, ep_ip_addr)) { + found = 1; + break; + } + } + } + + if (found) { + if (conn->hdrdgst_en) { + iscsi->header_digest_set = true; + iscsi->header_digest = 1; + } + + if (conn->datadgst_en) { + iscsi->data_digest_set = true; + iscsi->data_digest = 1; + } + iscsi->boot_taget_portal_set = true; + iscsi->boot_taget_portal = sess->tpgt; + + } else { + ret = -1; + } + + if (sec_ctrl_flags) + kfree(sec_tgt); +free_tgt: + if (pri_ctrl_flags) + kfree(pri_tgt); + + return ret; +} + +static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) +{ + struct qedi_ctx *qedi; + + if (!dev) { + QEDI_INFO(NULL, QEDI_LOG_EVT, + "dev is NULL so ignoring get_generic_tlv_data request.\n"); + return; + } + qedi = (struct qedi_ctx *)dev; + + memset(data, 0, sizeof(struct qed_generic_tlvs)); + ether_addr_copy(data->mac[0], qedi->mac); +} + +/* + * Protocol TLV handler + */ +static void qedi_get_protocol_tlv_data(void *dev, void *data) +{ + struct qed_mfw_tlv_iscsi *iscsi = data; + struct qed_iscsi_stats *fw_iscsi_stats; + struct nvm_iscsi_block *block = NULL; + u32 chap_en = 0, mchap_en = 0; + struct qedi_ctx *qedi = dev; + int rval = 0; + + fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL); + if (!fw_iscsi_stats) { + QEDI_ERR(&qedi->dbg_ctx, + "Could not allocate memory for fw_iscsi_stats.\n"); + goto exit_get_data; + } + + mutex_lock(&qedi->stats_lock); + /* Query firmware for offload stats */ + qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats); + mutex_unlock(&qedi->stats_lock); + + iscsi->rx_frames_set = true; + iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt; + iscsi->rx_bytes_set = true; + iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt; + iscsi->tx_frames_set = true; + iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt; + iscsi->tx_bytes_set = true; + iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt; + iscsi->frame_size_set = true; + iscsi->frame_size = qedi->ll2_mtu; + block = qedi_get_nvram_block(qedi); + if (block) { + chap_en = !!(block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_ENABLED); + mchap_en = !!(block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED); + + iscsi->auth_method_set = (chap_en || mchap_en) ? true : false; + iscsi->auth_method = 1; + if (chap_en) + iscsi->auth_method = 2; + if (mchap_en) + iscsi->auth_method = 3; + + iscsi->tx_desc_size_set = true; + iscsi->tx_desc_size = QEDI_SQ_SIZE; + iscsi->rx_desc_size_set = true; + iscsi->rx_desc_size = QEDI_CQ_SIZE; + + /* tpgt, hdr digest, data digest */ + rval = qedi_find_boot_info(qedi, iscsi, block); + if (rval) + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Boot target not set"); + } + + kfree(fw_iscsi_stats); +exit_get_data: + return; +} + static void qedi_link_update(void *dev, struct qed_link_output *link) { struct qedi_ctx *qedi = (struct qedi_ctx *)dev; @@ -896,6 +1092,8 @@ static void qedi_link_update(void *dev, struct qed_link_output *link) static struct qed_iscsi_cb_ops qedi_cb_ops = { { .link_update = qedi_link_update, + .get_protocol_tlv_data = qedi_get_protocol_tlv_data, + .get_generic_tlv_data = qedi_get_generic_tlv_data, } }; diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index 026182d3b27c..224d7ddeeb76 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c @@ -134,6 +134,13 @@ struct knav_dma_chan { static struct knav_dma_pool_device *kdev; +static bool device_ready; +bool knav_dma_device_ready(void) +{ + return device_ready; +} +EXPORT_SYMBOL_GPL(knav_dma_device_ready); + static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg) { if (!memcmp(&chan->cfg, cfg, sizeof(*cfg))) @@ -773,6 +780,7 @@ static int knav_dma_probe(struct platform_device *pdev) debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL, &knav_dma_debug_ops); + device_ready = true; return ret; } diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h index 905b974d1bdc..56866ba4cfc4 100644 --- a/drivers/soc/ti/knav_qmss.h +++ b/drivers/soc/ti/knav_qmss.h @@ -292,6 +292,11 @@ struct knav_queue { struct list_head list; }; +enum qmss_version { + QMSS, + QMSS_66AK2G, +}; + struct knav_device { struct device *dev; unsigned base_id; @@ -305,6 +310,7 @@ struct knav_device { struct list_head pools; struct list_head pdsps; struct list_head qmgrs; + enum qmss_version version; }; struct knav_range_ops { diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c index 77d6b5c03aae..419365a8d1c2 100644 --- a/drivers/soc/ti/knav_qmss_queue.c +++ b/drivers/soc/ti/knav_qmss_queue.c @@ -42,6 +42,15 @@ static DEFINE_MUTEX(knav_dev_lock); #define KNAV_QUEUE_PUSH_REG_INDEX 4 #define KNAV_QUEUE_POP_REG_INDEX 5 +/* Queue manager register indices in DTS for QMSS in K2G NAVSS. + * There are no status and vbusm push registers on this version + * of QMSS. Push registers are same as pop, So all indices above 1 + * are to be re-defined + */ +#define KNAV_L_QUEUE_CONFIG_REG_INDEX 1 +#define KNAV_L_QUEUE_REGION_REG_INDEX 2 +#define KNAV_L_QUEUE_PUSH_REG_INDEX 3 + /* PDSP register indices in DTS */ #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1 @@ -65,6 +74,13 @@ static DEFINE_MUTEX(knav_dev_lock); */ const char *knav_acc_firmwares[] = {"ks2_qmss_pdsp_acc48.bin"}; +static bool device_ready; +bool knav_qmss_device_ready(void) +{ + return device_ready; +} +EXPORT_SYMBOL_GPL(knav_qmss_device_ready); + /** * knav_queue_notify: qmss queue notfier call * @@ -1169,8 +1185,12 @@ static int knav_queue_setup_link_ram(struct knav_device *kdev) dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n", &block->dma, block->virt, block->size); writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0); - writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0); - + if (kdev->version == QMSS_66AK2G) + writel_relaxed(block->size, + &qmgr->reg_config->link_ram_size0); + else + writel_relaxed(block->size - 1, + &qmgr->reg_config->link_ram_size0); block++; if (!block->size) continue; @@ -1387,42 +1407,64 @@ static int knav_queue_init_qmgrs(struct knav_device *kdev, qmgr->reg_peek = knav_queue_map_reg(kdev, child, KNAV_QUEUE_PEEK_REG_INDEX); - qmgr->reg_status = - knav_queue_map_reg(kdev, child, - KNAV_QUEUE_STATUS_REG_INDEX); + + if (kdev->version == QMSS) { + qmgr->reg_status = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_STATUS_REG_INDEX); + } + qmgr->reg_config = knav_queue_map_reg(kdev, child, + (kdev->version == QMSS_66AK2G) ? + KNAV_L_QUEUE_CONFIG_REG_INDEX : KNAV_QUEUE_CONFIG_REG_INDEX); qmgr->reg_region = knav_queue_map_reg(kdev, child, + (kdev->version == QMSS_66AK2G) ? + KNAV_L_QUEUE_REGION_REG_INDEX : KNAV_QUEUE_REGION_REG_INDEX); + qmgr->reg_push = knav_queue_map_reg(kdev, child, - KNAV_QUEUE_PUSH_REG_INDEX); - qmgr->reg_pop = - knav_queue_map_reg(kdev, child, - KNAV_QUEUE_POP_REG_INDEX); + (kdev->version == QMSS_66AK2G) ? + KNAV_L_QUEUE_PUSH_REG_INDEX : + KNAV_QUEUE_PUSH_REG_INDEX); + + if (kdev->version == QMSS) { + qmgr->reg_pop = + knav_queue_map_reg(kdev, child, + KNAV_QUEUE_POP_REG_INDEX); + } - if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) || + if (IS_ERR(qmgr->reg_peek) || + ((kdev->version == QMSS) && + (IS_ERR(qmgr->reg_status) || IS_ERR(qmgr->reg_pop))) || IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) || - IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) { + IS_ERR(qmgr->reg_push)) { dev_err(dev, "failed to map qmgr regs\n"); + if (kdev->version == QMSS) { + if (!IS_ERR(qmgr->reg_status)) + devm_iounmap(dev, qmgr->reg_status); + if (!IS_ERR(qmgr->reg_pop)) + devm_iounmap(dev, qmgr->reg_pop); + } if (!IS_ERR(qmgr->reg_peek)) devm_iounmap(dev, qmgr->reg_peek); - if (!IS_ERR(qmgr->reg_status)) - devm_iounmap(dev, qmgr->reg_status); if (!IS_ERR(qmgr->reg_config)) devm_iounmap(dev, qmgr->reg_config); if (!IS_ERR(qmgr->reg_region)) devm_iounmap(dev, qmgr->reg_region); if (!IS_ERR(qmgr->reg_push)) devm_iounmap(dev, qmgr->reg_push); - if (!IS_ERR(qmgr->reg_pop)) - devm_iounmap(dev, qmgr->reg_pop); devm_kfree(dev, qmgr); continue; } + /* Use same push register for pop as well */ + if (kdev->version == QMSS_66AK2G) + qmgr->reg_pop = qmgr->reg_push; + list_add_tail(&qmgr->list, &kdev->qmgrs); dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n", qmgr->start_queue, qmgr->num_queues, @@ -1681,10 +1723,24 @@ static int knav_queue_init_queues(struct knav_device *kdev) return 0; } +/* Match table for of_platform binding */ +static const struct of_device_id keystone_qmss_of_match[] = { + { + .compatible = "ti,keystone-navigator-qmss", + }, + { + .compatible = "ti,66ak2g-navss-qm", + .data = (void *)QMSS_66AK2G, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); + static int knav_queue_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device_node *qmgrs, *queue_pools, *regions, *pdsps; + const struct of_device_id *match; struct device *dev = &pdev->dev; u32 temp[2]; int ret; @@ -1700,6 +1756,10 @@ static int knav_queue_probe(struct platform_device *pdev) return -ENOMEM; } + match = of_match_device(of_match_ptr(keystone_qmss_of_match), dev); + if (match && match->data) + kdev->version = QMSS_66AK2G; + platform_set_drvdata(pdev, kdev); kdev->dev = dev; INIT_LIST_HEAD(&kdev->queue_ranges); @@ -1796,6 +1856,7 @@ static int knav_queue_probe(struct platform_device *pdev) debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL, &knav_queue_debug_ops); + device_ready = true; return 0; err: @@ -1815,13 +1876,6 @@ static int knav_queue_remove(struct platform_device *pdev) return 0; } -/* Match table for of_platform binding */ -static struct of_device_id keystone_qmss_of_match[] = { - { .compatible = "ti,keystone-navigator-qmss", }, - {}, -}; -MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); - static struct platform_driver keystone_qmss_driver = { .probe = knav_queue_probe, .remove = knav_queue_remove, diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c index c723a04bc3d6..a17dd2972ccd 100644 --- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c +++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c @@ -719,6 +719,9 @@ static int port_vlans_add(struct net_device *netdev, struct ethsw_port_priv *port_priv = netdev_priv(netdev); int vid, err; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + if (switchdev_trans_ph_prepare(trans)) return 0; @@ -873,6 +876,9 @@ static int port_vlans_del(struct net_device *netdev, struct ethsw_port_priv *port_priv = netdev_priv(netdev); int vid, err; + if (netif_is_bridge_master(vlan->obj.orig_dev)) + return -EOPNOTSUPP; + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { err = ethsw_port_del_vlan(port_priv, vid); if (err) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index eeaf6739215f..e7cf7d21cfb5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -32,6 +32,7 @@ #include <linux/skbuff.h> #include <net/sock.h> +#include <net/xdp.h> #include "vhost.h" @@ -45,8 +46,10 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" #define VHOST_NET_WEIGHT 0x80000 /* Max number of packets transferred before requeueing the job. - * Using this limit prevents one virtqueue from starving rx. */ -#define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2) + * Using this limit prevents one virtqueue from starving others with small + * pkts. + */ +#define VHOST_NET_PKT_WEIGHT 256 /* MAX number of TX used buffers for outstanding zerocopy */ #define VHOST_MAX_PEND 128 @@ -183,10 +186,10 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) static int vhost_net_buf_peek_len(void *ptr) { - if (tun_is_xdp_buff(ptr)) { - struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + if (tun_is_xdp_frame(ptr)) { + struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); - return xdp->data_end - xdp->data; + return xdpf->len; } return __skb_array_len_with_tag(ptr); @@ -588,7 +591,7 @@ static void handle_tx(struct vhost_net *net) vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT) || - unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) { + unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) { vhost_poll_queue(&vq->poll); break; } @@ -784,6 +787,7 @@ static void handle_rx(struct vhost_net *net) struct socket *sock; struct iov_iter fixup; __virtio16 num_buffers; + int recv_pkts = 0; mutex_lock_nested(&vq->mutex, 0); sock = vq->private_data; @@ -884,7 +888,8 @@ static void handle_rx(struct vhost_net *net) if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len); total_len += vhost_len; - if (unlikely(total_len >= VHOST_NET_WEIGHT)) { + if (unlikely(total_len >= VHOST_NET_WEIGHT) || + unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) { vhost_poll_queue(&vq->poll); goto out; } |