diff options
Diffstat (limited to 'drivers/net')
205 files changed, 7524 insertions, 2765 deletions
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 604f54112665..06279ba64cc8 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, return; /* Enable flow control on BCM5301x's CPU port */ - if (is5301x(dev) && port == dev->cpu_port) + if (is5301x(dev) && dsa_is_cpu_port(ds, port)) tx_pause = rx_pause = true; if (phydev->pause) { @@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, return; } } - } else if (is5301x(dev)) { - if (port != dev->cpu_port) { - b53_force_port_config(dev, dev->cpu_port, 2000, - DUPLEX_FULL, true, true); - b53_force_link(dev, dev->cpu_port, 1); - } } /* Re-negotiate EEE if it was enabled already */ @@ -2302,33 +2296,30 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5325_DEVICE_ID, .dev_name = "BCM5325", .vlans = 16, - .enabled_ports = 0x1f, + .enabled_ports = 0x3f, .arl_bins = 2, .arl_buckets = 1024, .imp_port = 5, - .cpu_port = B53_CPU_PORT_25, .duplex_reg = B53_DUPLEX_STAT_FE, }, { .chip_id = BCM5365_DEVICE_ID, .dev_name = "BCM5365", .vlans = 256, - .enabled_ports = 0x1f, + .enabled_ports = 0x3f, .arl_bins = 2, .arl_buckets = 1024, .imp_port = 5, - .cpu_port = B53_CPU_PORT_25, .duplex_reg = B53_DUPLEX_STAT_FE, }, { .chip_id = BCM5389_DEVICE_ID, .dev_name = "BCM5389", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2338,11 +2329,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5395_DEVICE_ID, .dev_name = "BCM5395", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2352,11 +2342,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5397_DEVICE_ID, .dev_name = "BCM5397", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS_9798, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2366,11 +2355,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM5398_DEVICE_ID, .dev_name = "BCM5398", .vlans = 4096, - .enabled_ports = 0x7f, + .enabled_ports = 0x17f, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS_9798, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2380,12 +2368,11 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53115_DEVICE_ID, .dev_name = "BCM53115", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x11f, .arl_bins = 4, .arl_buckets = 1024, .vta_regs = B53_VTA_REGS, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, @@ -2394,11 +2381,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53125_DEVICE_ID, .dev_name = "BCM53125", .vlans = 4096, - .enabled_ports = 0xff, + .enabled_ports = 0x1ff, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2412,7 +2398,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2426,7 +2411,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS_63XX, .duplex_reg = B53_DUPLEX_STAT_63XX, .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, @@ -2436,11 +2420,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53010_DEVICE_ID, .dev_name = "BCM53010", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x1bf, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2454,7 +2437,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2468,7 +2450,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2478,11 +2459,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53018_DEVICE_ID, .dev_name = "BCM53018", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x1bf, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2492,11 +2472,10 @@ static const struct b53_chip_data b53_switch_chips[] = { .chip_id = BCM53019_DEVICE_ID, .dev_name = "BCM53019", .vlans = 4096, - .enabled_ports = 0x1f, + .enabled_ports = 0x1bf, .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2510,7 +2489,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2524,7 +2502,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2539,7 +2516,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 256, .imp_port = 8, - .cpu_port = 8, /* TODO: ports 4, 5, 8 */ .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2553,7 +2529,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 1024, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2567,7 +2542,6 @@ static const struct b53_chip_data b53_switch_chips[] = { .arl_bins = 4, .arl_buckets = 256, .imp_port = 8, - .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, .jumbo_pm_reg = B53_JUMBO_PORT_MASK, @@ -2593,7 +2567,6 @@ static int b53_switch_init(struct b53_device *dev) dev->vta_regs[2] = chip->vta_regs[2]; dev->jumbo_pm_reg = chip->jumbo_pm_reg; dev->imp_port = chip->imp_port; - dev->cpu_port = chip->cpu_port; dev->num_vlans = chip->vlans; dev->num_arl_bins = chip->arl_bins; dev->num_arl_buckets = chip->arl_buckets; @@ -2625,16 +2598,8 @@ static int b53_switch_init(struct b53_device *dev) break; #endif } - } else if (dev->chip_id == BCM53115_DEVICE_ID) { - u64 strap_value; - - b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); - /* use second IMP port if GMII is enabled */ - if (strap_value & SV_GMII_CTRL_115) - dev->cpu_port = 5; } - dev->enabled_ports |= BIT(dev->cpu_port); dev->num_ports = fls(dev->enabled_ports); dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 959a52d41f0a..544101e74bca 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -124,7 +124,6 @@ struct b53_device { /* used ports mask */ u16 enabled_ports; unsigned int imp_port; - unsigned int cpu_port; /* connect specific data */ u8 current_page; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 7578a5c38df5..a86ddc4bb897 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -667,7 +667,9 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) if (priv->int_phy_mask & BIT(port)) return priv->hw_params.gphy_rev; else - return 0; + return PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; } static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index a3a9636430d6..01c01bd0c854 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -955,8 +955,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) switch_node = dev->of_node; ports_node = of_get_child_by_name(switch_node, "ports"); + if (!ports_node) + ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); if (!ports_node) { - dev_err(dev, "Incorrect bindings: absent \"ports\" node\n"); + dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); return -ENODEV; } diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h index fcf465f7f922..214f710d7dd5 100644 --- a/drivers/net/dsa/realtek-smi-core.h +++ b/drivers/net/dsa/realtek-smi-core.h @@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable); int rtl8366_reset_vlan(struct realtek_smi *smi); -int rtl8366_init_vlan(struct realtek_smi *smi); -int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack); int rtl8366_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack); diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 75897a369096..bdb8d8d34880 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi) } EXPORT_SYMBOL_GPL(rtl8366_reset_vlan); -int rtl8366_init_vlan(struct realtek_smi *smi) -{ - int port; - int ret; - - ret = rtl8366_reset_vlan(smi); - if (ret) - return ret; - - /* Loop over the available ports, for each port, associate - * it with the VLAN (port+1) - */ - for (port = 0; port < smi->num_ports; port++) { - u32 mask; - - if (port == smi->cpu_port) - /* For the CPU port, make all ports members of this - * VLAN. - */ - mask = GENMASK((int)smi->num_ports - 1, 0); - else - /* For all other ports, enable itself plus the - * CPU port. - */ - mask = BIT(port) | BIT(smi->cpu_port); - - /* For each port, set the port as member of VLAN (port+1) - * and untagged, except for the CPU port: the CPU port (5) is - * member of VLAN 6 and so are ALL the other ports as well. - * Use filter 0 (no filter). - */ - dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n", - (port + 1), port, mask); - ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0); - if (ret) - return ret; - - dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n", - (port + 1), port, (port + 1)); - ret = rtl8366_set_pvid(smi, port, (port + 1)); - if (ret) - return ret; - } - - return rtl8366_enable_vlan(smi, true); -} -EXPORT_SYMBOL_GPL(rtl8366_init_vlan); - -int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, - struct netlink_ext_ack *extack) -{ - struct realtek_smi *smi = ds->priv; - struct rtl8366_vlan_4k vlan4k; - int ret; - - /* Use VLAN nr port + 1 since VLAN0 is not valid */ - if (!smi->ops->is_vlan_valid(smi, port + 1)) - return -EINVAL; - - dev_info(smi->dev, "%s filtering on port %d\n", - vlan_filtering ? "enable" : "disable", - port); - - /* TODO: - * The hardware support filter ID (FID) 0..7, I have no clue how to - * support this in the driver when the callback only says on/off. - */ - ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k); - if (ret) - return ret; - - /* Just set the filter to FID 1 for now then */ - ret = rtl8366_set_vlan(smi, port + 1, - vlan4k.member, - vlan4k.untag, - 1); - if (ret) - return ret; - - return 0; -} -EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering); - int rtl8366_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) @@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port, return ret; } - dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n", - vlan->vid, port, untagged ? "untagged" : "tagged", - pvid ? " PVID" : "no PVID"); - - if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) - dev_err(smi->dev, "port is DSA or CPU port\n"); + dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n", + vlan->vid, port, untagged ? "untagged" : "tagged", + pvid ? "PVID" : "no PVID"); member |= BIT(port); @@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, struct realtek_smi *smi = ds->priv; int ret, i; - dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port); + dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port); for (i = 0; i < smi->num_vlan_mc; i++) { struct rtl8366_vlan_mc vlanmc; @@ -457,7 +371,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, * anymore then clear the whole member * config so it can be reused. */ - if (!vlanmc.member && vlanmc.untag) { + if (!vlanmc.member) { vlanmc.vid = 0; vlanmc.priority = 0; vlanmc.fid = 0; diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index a89093bc6c6a..bb9d017c2f9f 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -143,6 +143,21 @@ #define RTL8366RB_PHY_NO_OFFSET 9 #define RTL8366RB_PHY_NO_MASK (0x1f << 9) +/* VLAN Ingress Control Register 1, one bit per port. + * bit 0 .. 5 will make the switch drop ingress frames without + * VID such as untagged or priority-tagged frames for respective + * port. + * bit 6 .. 11 will make the switch drop ingress frames carrying + * a C-tag with VID != 0 for respective port. + */ +#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E +#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6)) + +/* VLAN Ingress Control Register 2, one bit per port. + * bit0 .. bit5 will make the switch drop all ingress frames with + * a VLAN classification that does not include the port is in its + * member set. + */ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f /* LED control registers */ @@ -300,6 +315,13 @@ #define RTL8366RB_INTERRUPT_STATUS_REG 0x0442 #define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */ +/* Port isolation registers */ +#define RTL8366RB_PORT_ISO_BASE 0x0F08 +#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum)) +#define RTL8366RB_PORT_ISO_EN BIT(0) +#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1) +#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1) + /* bits 0..5 enable force when cleared */ #define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11 @@ -314,9 +336,11 @@ /** * struct rtl8366rb - RTL8366RB-specific data * @max_mtu: per-port max MTU setting + * @pvid_enabled: if PVID is set for respective port */ struct rtl8366rb { unsigned int max_mtu[RTL8366RB_NUM_PORTS]; + bool pvid_enabled[RTL8366RB_NUM_PORTS]; }; static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { @@ -835,6 +859,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (ret) return ret; + /* Isolate all user ports so they can only send packets to itself and the CPU port */ + for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { + ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i), + RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) | + RTL8366RB_PORT_ISO_EN); + if (ret) + return ret; + } + /* CPU port can send packets to all ports */ + ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU), + RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) | + RTL8366RB_PORT_ISO_EN); + if (ret) + return ret; + /* Set up the "green ethernet" feature */ ret = rtl8366rb_jam_table(rtl8366rb_green_jam, ARRAY_SIZE(rtl8366rb_green_jam), smi, false); @@ -911,11 +950,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (ret) return ret; - /* Discard VLAN tagged packets if the port is not a member of - * the VLAN with which the packets is associated. - */ + /* Accept all packets by default, we enable filtering on-demand */ + ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, + 0); + if (ret) + return ret; ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, - RTL8366RB_PORT_ALL); + 0); if (ret) return ret; @@ -963,7 +1004,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds) return ret; } - ret = rtl8366_init_vlan(smi); + ret = rtl8366_reset_vlan(smi); if (ret) return ret; @@ -977,8 +1018,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds) return -ENODEV; } - ds->configure_vlan_while_not_filtering = false; - return 0; } @@ -1127,6 +1166,112 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port) rb8366rb_set_port_led(smi, port, false); } +static int +rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct realtek_smi *smi = ds->priv; + unsigned int port_bitmap = 0; + int ret, i; + + /* Loop over all other ports than the current one */ + for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { + /* Current port handled last */ + if (i == port) + continue; + /* Not on this bridge */ + if (dsa_to_port(ds, i)->bridge_dev != bridge) + continue; + /* Join this port to each other port on the bridge */ + ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), + RTL8366RB_PORT_ISO_PORTS(BIT(port)), + RTL8366RB_PORT_ISO_PORTS(BIT(port))); + if (ret) + dev_err(smi->dev, "failed to join port %d\n", port); + + port_bitmap |= BIT(i); + } + + /* Set the bits for the ports we can access */ + return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), + RTL8366RB_PORT_ISO_PORTS(port_bitmap), + RTL8366RB_PORT_ISO_PORTS(port_bitmap)); +} + +static void +rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct realtek_smi *smi = ds->priv; + unsigned int port_bitmap = 0; + int ret, i; + + /* Loop over all other ports than this one */ + for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) { + /* Current port handled last */ + if (i == port) + continue; + /* Not on this bridge */ + if (dsa_to_port(ds, i)->bridge_dev != bridge) + continue; + /* Remove this port from any other port on the bridge */ + ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i), + RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0); + if (ret) + dev_err(smi->dev, "failed to leave port %d\n", port); + + port_bitmap |= BIT(i); + } + + /* Clear the bits for the ports we can not access, leave ourselves */ + regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port), + RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0); +} + +/** + * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames + * @smi: SMI state container + * @port: the port to drop untagged and C-tagged frames on + * @drop: whether to drop or pass untagged and C-tagged frames + */ +static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop) +{ + return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG, + RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port), + drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0); +} + +static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, + struct netlink_ext_ack *extack) +{ + struct realtek_smi *smi = ds->priv; + struct rtl8366rb *rb; + int ret; + + rb = smi->chip_data; + + dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port, + vlan_filtering ? "enable" : "disable"); + + /* If the port is not in the member set, the frame will be dropped */ + ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, + BIT(port), vlan_filtering ? BIT(port) : 0); + if (ret) + return ret; + + /* If VLAN filtering is enabled and PVID is also enabled, we must + * not drop any untagged or C-tagged frames. If we turn off VLAN + * filtering on a port, we need to accept any frames. + */ + if (vlan_filtering) + ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]); + else + ret = rtl8366rb_drop_untagged(smi, port, false); + + return ret; +} + static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct realtek_smi *smi = ds->priv; @@ -1338,24 +1483,44 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val) static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index) { + struct rtl8366rb *rb; + bool pvid_enabled; + int ret; + + rb = smi->chip_data; + pvid_enabled = !!index; + if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS) return -EINVAL; - return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), + ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), RTL8366RB_PORT_VLAN_CTRL_MASK << RTL8366RB_PORT_VLAN_CTRL_SHIFT(port), (index & RTL8366RB_PORT_VLAN_CTRL_MASK) << RTL8366RB_PORT_VLAN_CTRL_SHIFT(port)); + if (ret) + return ret; + + rb->pvid_enabled[port] = pvid_enabled; + + /* If VLAN filtering is enabled and PVID is also enabled, we must + * not drop any untagged or C-tagged frames. Make sure to update the + * filtering setting. + */ + if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port))) + ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled); + + return ret; } static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan) { - unsigned int max = RTL8366RB_NUM_VLANS; + unsigned int max = RTL8366RB_NUM_VLANS - 1; if (smi->vlan4k_enabled) max = RTL8366RB_NUM_VIDS - 1; - if (vlan == 0 || vlan > max) + if (vlan > max) return false; return true; @@ -1510,7 +1675,9 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = { .get_strings = rtl8366_get_strings, .get_ethtool_stats = rtl8366_get_ethtool_stats, .get_sset_count = rtl8366_get_sset_count, - .port_vlan_filtering = rtl8366_vlan_filtering, + .port_bridge_join = rtl8366rb_port_bridge_join, + .port_bridge_leave = rtl8366rb_port_bridge_leave, + .port_vlan_filtering = rtl8366rb_vlan_filtering, .port_vlan_add = rtl8366_vlan_add, .port_vlan_del = rtl8366_vlan_del, .port_enable = rtl8366rb_port_enable, diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 5e5d24e7c02b..618c8d6a8be1 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -226,12 +226,10 @@ struct sja1105_private { bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS]; phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS]; bool fixed_link[SJA1105_MAX_NUM_PORTS]; - bool vlan_aware; unsigned long ucast_egress_floods; unsigned long bcast_egress_floods; const struct sja1105_info *info; size_t max_xfer_len; - struct gpio_desc *reset_gpio; struct spi_device *spidev; struct dsa_switch *ds; u16 bridge_pvid[SJA1105_MAX_NUM_PORTS]; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 7c0db80eff00..0f1bba0076a8 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -27,15 +27,29 @@ #define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull -static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len, - unsigned int startup_delay) +/* Configure the optional reset pin and bring up switch */ +static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len, + unsigned int startup_delay) { + struct gpio_desc *gpio; + + gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(gpio)) + return PTR_ERR(gpio); + + if (!gpio) + return 0; + gpiod_set_value_cansleep(gpio, 1); /* Wait for minimum reset pulse length */ msleep(pulse_len); gpiod_set_value_cansleep(gpio, 0); /* Wait until chip is ready after reset */ msleep(startup_delay); + + gpiod_put(gpio); + + return 0; } static void @@ -1766,6 +1780,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port, static int sja1105_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { + struct dsa_port *dp = dsa_to_port(ds, port); struct sja1105_private *priv = ds->priv; struct device *dev = ds->dev; int i; @@ -1802,7 +1817,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, u64_to_ether_addr(l2_lookup.macaddr, macaddr); /* We need to hide the dsa_8021q VLANs from the user. */ - if (!priv->vlan_aware) + if (!dsa_port_is_vlan_filtering(dp)) l2_lookup.vlanid = 0; rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); if (rc) @@ -2295,11 +2310,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, tpid2 = ETH_P_SJA1105; } - if (priv->vlan_aware == enabled) - return 0; - - priv->vlan_aware = enabled; - table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; general_params = table->entries; /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ @@ -2332,7 +2342,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, */ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS]; l2_lookup_params = table->entries; - l2_lookup_params->shared_learn = !priv->vlan_aware; + l2_lookup_params->shared_learn = !enabled; for (port = 0; port < ds->num_ports; port++) { if (dsa_is_unused_port(ds, port)) @@ -2965,7 +2975,6 @@ static int sja1105_setup_ports(struct sja1105_private *priv) continue; dp->priv = sp; - sp->dp = dp; sp->data = tagger_data; slave = dp->slave; kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit); @@ -3117,7 +3126,7 @@ static void sja1105_teardown(struct dsa_switch *ds) sja1105_static_config_free(&priv->static_config); } -const struct dsa_switch_ops sja1105_switch_ops = { +static const struct dsa_switch_ops sja1105_switch_ops = { .get_tag_protocol = sja1105_get_tag_protocol, .setup = sja1105_setup, .teardown = sja1105_teardown, @@ -3166,7 +3175,6 @@ const struct dsa_switch_ops sja1105_switch_ops = { .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload, .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload, }; -EXPORT_SYMBOL_GPL(sja1105_switch_ops); static const struct of_device_id sja1105_dt_ids[]; @@ -3230,17 +3238,14 @@ static int sja1105_probe(struct spi_device *spi) return -EINVAL; } + rc = sja1105_hw_reset(dev, 1, 1); + if (rc) + return rc; + priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL); if (!priv) return -ENOMEM; - /* Configure the optional reset pin and bring up switch */ - priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); - if (IS_ERR(priv->reset_gpio)) - dev_dbg(dev, "reset-gpios not defined, ignoring\n"); - else - sja1105_hw_reset(priv->reset_gpio, 1, 1); - /* Populate our driver private structure (priv) based on * the device tree node that was probed (spi) */ diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index 691f6dd7e669..54396992a919 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -64,6 +64,7 @@ enum sja1105_ptp_clk_mode { static int sja1105_change_rxtstamping(struct sja1105_private *priv, bool on) { + struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; struct sja1105_general_params_entry *general_params; struct sja1105_table *table; @@ -79,7 +80,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv, priv->tagger_data.stampable_skb = NULL; } ptp_cancel_worker_sync(ptp_data->clock); - skb_queue_purge(&ptp_data->skb_txtstamp_queue); + skb_queue_purge(&tagger_data->skb_txtstamp_queue); skb_queue_purge(&ptp_data->skb_rxtstamp_queue); return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING); @@ -452,40 +453,6 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port, return priv->info->rxtstamp(ds, port, skb); } -void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id, - enum sja1110_meta_tstamp dir, u64 tstamp) -{ - struct sja1105_private *priv = ds->priv; - struct sja1105_ptp_data *ptp_data = &priv->ptp_data; - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; - struct skb_shared_hwtstamps shwt = {0}; - - /* We don't care about RX timestamps on the CPU port */ - if (dir == SJA1110_META_TSTAMP_RX) - return; - - spin_lock(&ptp_data->skb_txtstamp_queue.lock); - - skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) { - if (SJA1105_SKB_CB(skb)->ts_id != ts_id) - continue; - - __skb_unlink(skb, &ptp_data->skb_txtstamp_queue); - skb_match = skb; - - break; - } - - spin_unlock(&ptp_data->skb_txtstamp_queue.lock); - - if (WARN_ON(!skb_match)) - return; - - shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp)); - skb_complete_tx_timestamp(skb_match, &shwt); -} -EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp); - /* In addition to cloning the skb which is done by the common * sja1105_port_txtstamp, we need to generate a timestamp ID and save the * packet to the TX timestamping queue. @@ -494,7 +461,6 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; struct sja1105_private *priv = ds->priv; - struct sja1105_ptp_data *ptp_data = &priv->ptp_data; struct sja1105_port *sp = &priv->ports[port]; u8 ts_id; @@ -510,7 +476,7 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) spin_unlock(&sp->data->meta_lock); - skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone); + skb_queue_tail(&sp->data->skb_txtstamp_queue, clone); } /* Called from dsa_skb_tx_timestamp. This callback is just to clone @@ -953,7 +919,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds) /* Only used on SJA1105 */ skb_queue_head_init(&ptp_data->skb_rxtstamp_queue); /* Only used on SJA1110 */ - skb_queue_head_init(&ptp_data->skb_txtstamp_queue); + skb_queue_head_init(&tagger_data->skb_txtstamp_queue); spin_lock_init(&tagger_data->meta_lock); ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev); @@ -971,6 +937,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds) void sja1105_ptp_clock_unregister(struct dsa_switch *ds) { struct sja1105_private *priv = ds->priv; + struct sja1105_tagger_data *tagger_data = &priv->tagger_data; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; if (IS_ERR_OR_NULL(ptp_data->clock)) @@ -978,7 +945,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds) del_timer_sync(&ptp_data->extts_timer); ptp_cancel_worker_sync(ptp_data->clock); - skb_queue_purge(&ptp_data->skb_txtstamp_queue); + skb_queue_purge(&tagger_data->skb_txtstamp_queue); skb_queue_purge(&ptp_data->skb_rxtstamp_queue); ptp_clock_unregister(ptp_data->clock); ptp_data->clock = NULL; diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index 3c874bb4c17b..3ae6b9fdd492 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -8,21 +8,6 @@ #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) -/* Timestamps are in units of 8 ns clock ticks (equivalent to - * a fixed 125 MHz clock). - */ -#define SJA1105_TICK_NS 8 - -static inline s64 ns_to_sja1105_ticks(s64 ns) -{ - return ns / SJA1105_TICK_NS; -} - -static inline s64 sja1105_ticks_to_ns(s64 ticks) -{ - return ticks * SJA1105_TICK_NS; -} - /* Calculate the first base_time in the future that satisfies this * relationship: * @@ -77,10 +62,6 @@ struct sja1105_ptp_data { struct timer_list extts_timer; /* Used only on SJA1105 to reconstruct partial timestamps */ struct sk_buff_head skb_rxtstamp_queue; - /* Used on SJA1110 where meta frames are generated only for - * 2-step TX timestamps - */ - struct sk_buff_head skb_txtstamp_queue; struct ptp_clock_info caps; struct ptp_clock *clock; struct sja1105_ptp_cmd cmd; diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index 6802f4057cc0..d55572994e1f 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -494,13 +494,15 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port, bool append) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); + struct dsa_port *dp = dsa_to_port(priv->ds, port); + bool vlan_aware = dsa_port_is_vlan_filtering(dp); int rc; - if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { + if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on DMAC"); return -EOPNOTSUPP; - } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only redirect based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; @@ -568,6 +570,8 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port, u32 num_entries, struct action_gate_entry *entries) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); + struct dsa_port *dp = dsa_to_port(priv->ds, port); + bool vlan_aware = dsa_port_is_vlan_filtering(dp); int ipv = -1; int i, rc; s32 rem; @@ -592,11 +596,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port, return -ERANGE; } - if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { + if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on DMAC"); return -EOPNOTSUPP; - } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { + } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) { NL_SET_ERR_MSG_MOD(extack, "Can only gate based on {DMAC, VID, PCP}"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 54cdafdd067d..9acf589b1178 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -151,10 +151,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv) data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset", GPIOD_OUT_LOW); if (IS_ERR(data->reset_gpio)) { - error = PTR_ERR(data->reset_gpio); - dev_err(priv->dev, "Failed to request gpio: %d\n", error); mdiobus_free(bus); - return error; + return dev_err_probe(priv->dev, PTR_ERR(data->reset_gpio), + "Failed to request gpio\n"); } of_property_read_u32(np, "phy-reset-duration", &data->msec); @@ -166,9 +165,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv) error = of_mdiobus_register(bus, priv->dev->of_node); if (error) { - dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name); mdiobus_free(bus); - return error; + return dev_err_probe(priv->dev, error, + "cannot register MDIO bus %s\n", bus->name); } return 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3b51b172b317..1c258e4ddc96 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1847,7 +1847,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, buffer_info->skb = NULL; buffer_info->length = 0; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); - netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); + netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed"); break; } buffer_info->dma = mapping; @@ -2662,10 +2662,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* enable device (incl. PCI PM wakeup and hotplug setup) */ err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1c chip can DMA to 64-bit addresses, but it uses a single diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 753973ac922e..2e22483a9040 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -2297,10 +2297,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int err = 0; err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * The atl1e chip can DMA to 64-bit addresses, but it uses a single diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index fa784953c601..38b465452902 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1200,7 +1200,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp) bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size, &bp->rx_ring_dma, gfp); if (!bp->rx_ring) { - /* Allocation may have failed due to pci_alloc_consistent + /* Allocation may have failed due to dma_alloc_coherent insisting on use of GFP_DMA, which is more restrictive than necessary... */ struct dma_desc *rx_ring; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 62f84cc91e4d..0fba01db336c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -13370,7 +13370,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } bnxt_inv_fw_health_reg(bp); - bnxt_dl_register(bp); + rc = bnxt_dl_register(bp); + if (rc) + goto init_err_dl; rc = register_netdev(dev); if (rc) @@ -13390,6 +13392,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) init_err_cleanup: bnxt_dl_unregister(bp); +init_err_dl: bnxt_shutdown_tc(bp); bnxt_clear_int_mode(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 9576547df4ab..951c0c00cc95 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -134,7 +134,7 @@ void bnxt_dl_fw_reporters_create(struct bnxt *bp) { struct bnxt_fw_health *health = bp->fw_health; - if (!bp->dl || !health) + if (!health) return; if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter) @@ -188,7 +188,7 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all) { struct bnxt_fw_health *health = bp->fw_health; - if (!bp->dl || !health) + if (!health) return; if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) && @@ -736,9 +736,6 @@ static const struct devlink_param bnxt_dl_params[] = { NULL), }; -static const struct devlink_param bnxt_dl_port_params[] = { -}; - static int bnxt_dl_params_register(struct bnxt *bp) { int rc; @@ -748,22 +745,10 @@ static int bnxt_dl_params_register(struct bnxt *bp) rc = devlink_params_register(bp->dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); - if (rc) { + if (rc) netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", rc); - return rc; - } - rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); - if (rc) { - netdev_err(bp->dev, "devlink_port_params_register failed\n"); - devlink_params_unregister(bp->dl, bnxt_dl_params, - ARRAY_SIZE(bnxt_dl_params)); - return rc; - } - devlink_params_publish(bp->dl); - - return 0; + return rc; } static void bnxt_dl_params_unregister(struct bnxt *bp) @@ -773,14 +758,13 @@ static void bnxt_dl_params_unregister(struct bnxt *bp) devlink_params_unregister(bp->dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); - devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, - ARRAY_SIZE(bnxt_dl_port_params)); } int bnxt_dl_register(struct bnxt *bp) { const struct devlink_ops *devlink_ops; struct devlink_port_attrs attrs = {}; + struct bnxt_dl *bp_dl; struct devlink *dl; int rc; @@ -795,21 +779,17 @@ int bnxt_dl_register(struct bnxt *bp) return -ENOMEM; } - bnxt_link_bp_to_dl(bp, dl); + bp->dl = dl; + bp_dl = devlink_priv(dl); + bp_dl->bp = bp; /* Add switchdev eswitch mode setting, if SRIOV supported */ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && bp->hwrm_spec_code > 0x10803) bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; - rc = devlink_register(dl); - if (rc) { - netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc); - goto err_dl_free; - } - if (!BNXT_PF(bp)) - return 0; + goto out; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = bp->pf.port_id; @@ -819,21 +799,20 @@ int bnxt_dl_register(struct bnxt *bp) rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { netdev_err(bp->dev, "devlink_port_register failed\n"); - goto err_dl_unreg; + goto err_dl_free; } rc = bnxt_dl_params_register(bp); if (rc) goto err_dl_port_unreg; +out: + devlink_register(dl); return 0; err_dl_port_unreg: devlink_port_unregister(&bp->dl_port); -err_dl_unreg: - devlink_unregister(dl); err_dl_free: - bnxt_link_bp_to_dl(bp, NULL); devlink_free(dl); return rc; } @@ -842,13 +821,10 @@ void bnxt_dl_unregister(struct bnxt *bp) { struct devlink *dl = bp->dl; - if (!dl) - return; - + devlink_unregister(dl); if (BNXT_PF(bp)) { bnxt_dl_params_unregister(bp); devlink_port_unregister(&bp->dl_port); } - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index d889f240da2b..406dc655a5fc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -20,19 +20,6 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) return ((struct bnxt_dl *)devlink_priv(dl))->bp; } -/* To clear devlink pointer from bp, pass NULL dl */ -static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) -{ - bp->dl = dl; - - /* add a back pointer in dl to bp */ - if (dl) { - struct bnxt_dl *bp_dl = devlink_priv(dl); - - bp_dl->bp = bp; - } -} - #define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 #define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 #define NVM_OFF_IGNORE_ARI 164 diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 23c7595d2a1d..6a8234bc9428 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -935,6 +935,48 @@ static int bcmgenet_set_coalesce(struct net_device *dev, return 0; } +static void bcmgenet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv; + u32 umac_cmd; + + priv = netdev_priv(dev); + + epause->autoneg = priv->autoneg_pause; + + if (netif_carrier_ok(dev)) { + /* report active state when link is up */ + umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD); + epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE); + epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE); + } else { + /* otherwise report stored settings */ + epause->tx_pause = priv->tx_pause; + epause->rx_pause = priv->rx_pause; + } +} + +static int bcmgenet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev) + return -ENODEV; + + if (!phy_validate_pause(dev->phydev, epause)) + return -EINVAL; + + priv->autoneg_pause = !!epause->autoneg; + priv->tx_pause = !!epause->tx_pause; + priv->rx_pause = !!epause->rx_pause; + + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + + return 0; +} + /* standard ethtool support functions. */ enum bcmgenet_stat_type { BCMGENET_STAT_NETDEV = -1, @@ -1587,6 +1629,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .get_rxnfc = bcmgenet_get_rxnfc, .set_rxnfc = bcmgenet_set_rxnfc, + .get_pauseparam = bcmgenet_get_pauseparam, + .set_pauseparam = bcmgenet_set_pauseparam, }; /* Power down the unimac, based on mode. */ @@ -3364,6 +3408,8 @@ static int bcmgenet_open(struct net_device *dev) goto err_irq1; } + bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause); + bcmgenet_netif_start(dev); netif_tx_start_all_queues(dev); @@ -3408,11 +3454,6 @@ static void bcmgenet_netif_stop(struct net_device *dev) */ cancel_work_sync(&priv->bcmgenet_irq_work); - priv->old_link = -1; - priv->old_speed = -1; - priv->old_duplex = -1; - priv->old_pause = -1; - /* tx reclaim */ bcmgenet_tx_reclaim_all(dev); bcmgenet_fini_dma(priv); @@ -3950,6 +3991,11 @@ static int bcmgenet_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); + /* Set default pause parameters */ + priv->autoneg_pause = 1; + priv->tx_pause = 1; + priv->rx_pause = 1; + SET_NETDEV_DEV(dev, &pdev->dev); dev_set_drvdata(&pdev->dev, dev); dev->watchdog_timeo = 2 * HZ; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 0a6d91b0f0aa..1cc2838e52c6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -594,6 +594,9 @@ struct bcmgenet_priv { /* other misc variables */ struct bcmgenet_hw_params *hw_params; + unsigned autoneg_pause:1; + unsigned tx_pause:1; + unsigned rx_pause:1; /* MDIO bus variables */ wait_queue_head_t wq; @@ -606,10 +609,6 @@ struct bcmgenet_priv { bool clk_eee_enabled; /* PHY device variables */ - int old_link; - int old_speed; - int old_duplex; - int old_pause; phy_interface_t phy_interface; int phy_addr; int ext_phy; @@ -690,6 +689,7 @@ int bcmgenet_mii_init(struct net_device *dev); int bcmgenet_mii_config(struct net_device *dev, bool init); int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 89d16c587bb7..ad56f54eda0a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -25,92 +25,80 @@ #include "bcmgenet.h" -/* setup netdev link state when PHY link status change and - * update UMAC and RGMII block when link up - */ -void bcmgenet_mii_setup(struct net_device *dev) +static void bcmgenet_mac_config(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; u32 reg, cmd_bits = 0; - bool status_changed = false; - if (priv->old_link != phydev->link) { - status_changed = true; - priv->old_link = phydev->link; - } + /* speed */ + if (phydev->speed == SPEED_1000) + cmd_bits = CMD_SPEED_1000; + else if (phydev->speed == SPEED_100) + cmd_bits = CMD_SPEED_100; + else + cmd_bits = CMD_SPEED_10; + cmd_bits <<= CMD_SPEED_SHIFT; - if (phydev->link) { - /* check speed/duplex/pause changes */ - if (priv->old_speed != phydev->speed) { - status_changed = true; - priv->old_speed = phydev->speed; - } + /* duplex */ + if (phydev->duplex != DUPLEX_FULL) { + cmd_bits |= CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; + } else { + /* pause capability defaults to Symmetric */ + if (priv->autoneg_pause) { + bool tx_pause = 0, rx_pause = 0; - if (priv->old_duplex != phydev->duplex) { - status_changed = true; - priv->old_duplex = phydev->duplex; - } + if (phydev->autoneg) + phy_get_pause(phydev, &tx_pause, &rx_pause); - if (priv->old_pause != phydev->pause) { - status_changed = true; - priv->old_pause = phydev->pause; + if (!tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + if (!rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; } - /* done if nothing has changed */ - if (!status_changed) - return; - - /* speed */ - if (phydev->speed == SPEED_1000) - cmd_bits = CMD_SPEED_1000; - else if (phydev->speed == SPEED_100) - cmd_bits = CMD_SPEED_100; - else - cmd_bits = CMD_SPEED_10; - cmd_bits <<= CMD_SPEED_SHIFT; - - /* duplex */ - if (phydev->duplex != DUPLEX_FULL) - cmd_bits |= CMD_HD_EN; - - /* pause capability */ - if (!phydev->pause) - cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE; - - /* - * Program UMAC and RGMII block based on established - * link speed, duplex, and pause. The speed set in - * umac->cmd tell RGMII block which clock to use for - * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). - * Receive clock is provided by the PHY. - */ - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg &= ~OOB_DISABLE; - reg |= RGMII_LINK; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + /* Manual override */ + if (!priv->rx_pause) + cmd_bits |= CMD_RX_PAUSE_IGNORE; + if (!priv->tx_pause) + cmd_bits |= CMD_TX_PAUSE_IGNORE; + } - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | - CMD_HD_EN | - CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); - reg |= cmd_bits; - if (reg & CMD_SW_RESET) { - reg &= ~CMD_SW_RESET; - bcmgenet_umac_writel(priv, reg, UMAC_CMD); - udelay(2); - reg |= CMD_TX_EN | CMD_RX_EN; - } + /* Program UMAC and RGMII block based on established + * link speed, duplex, and pause. The speed set in + * umac->cmd tell RGMII block which clock to use for + * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps). + * Receive clock is provided by the PHY. + */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; + reg |= RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | + CMD_HD_EN | + CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + if (reg & CMD_SW_RESET) { + reg &= ~CMD_SW_RESET; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - } else { - /* done if nothing has changed */ - if (!status_changed) - return; - - /* needed for MoCA fixed PHY to reflect correct link status */ - netif_carrier_off(dev); + udelay(2); + reg |= CMD_TX_EN | CMD_RX_EN; } + bcmgenet_umac_writel(priv, reg, UMAC_CMD); +} +/* setup netdev link state when PHY link status change and + * update UMAC and RGMII block when link up + */ +void bcmgenet_mii_setup(struct net_device *dev) +{ + struct phy_device *phydev = dev->phydev; + + if (phydev->link) + bcmgenet_mac_config(dev); phy_print_status(phydev); } @@ -130,6 +118,21 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev, return 0; } +void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx) +{ + struct phy_device *phydev = dev->phydev; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising, + rx | tx); + phy_start_aneg(phydev); + + mutex_lock(&phydev->lock); + if (phydev->link) + bcmgenet_mac_config(dev); + mutex_unlock(&phydev->lock); +} + void bcmgenet_phy_power_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -286,23 +289,53 @@ int bcmgenet_mii_probe(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; struct device_node *dn = kdev->of_node; + phy_interface_t phy_iface = priv->phy_interface; struct phy_device *phydev; - u32 phy_flags = 0; + u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; int ret; /* Communicate the integrated PHY revision */ if (priv->internal_phy) phy_flags = priv->gphy_rev; - /* Initialize link state variables that bcmgenet_mii_setup() uses */ - priv->old_link = -1; - priv->old_speed = -1; - priv->old_duplex = -1; - priv->old_pause = -1; + /* This is an ugly quirk but we have not been correctly interpreting + * the phy_interface values and we have done that across different + * drivers, so at least we are consistent in our mistakes. + * + * When the Generic PHY driver is in use either the PHY has been + * strapped or programmed correctly by the boot loader so we should + * stick to our incorrect interpretation since we have validated it. + * + * Now when a dedicated PHY driver is in use, we need to reverse the + * meaning of the phy_interface_mode values to something that the PHY + * driver will interpret and act on such that we have two mistakes + * canceling themselves so to speak. We only do this for the two + * modes that GENET driver officially supports on Broadcom STB chips: + * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other + * modes are not *officially* supported with the boot loader and the + * scripted environment generating Device Tree blobs for those + * platforms. + * + * Note that internal PHY, MoCA and fixed-link configurations are not + * affected because they use different phy_interface_t values or the + * Generic PHY driver. + */ + switch (priv->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + phy_iface = PHY_INTERFACE_MODE_RGMII_ID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + default: + break; + } if (dn) { phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, - phy_flags, priv->phy_interface); + phy_flags, phy_iface); if (!phydev) { pr_err("could not attach to PHY\n"); return -ENODEV; @@ -332,7 +365,7 @@ int bcmgenet_mii_probe(struct net_device *dev) phydev->dev_flags = phy_flags; ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, - priv->phy_interface); + phy_iface); if (ret) { pr_err("could not attach to PHY\n"); return -ENODEV; @@ -350,8 +383,6 @@ int bcmgenet_mii_probe(struct net_device *dev) return ret; } - linkmode_copy(phydev->advertising, phydev->supported); - /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs. On GENETv5 there is a hardware issue * that prevents the signaling of link UP interrupts when diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d8d87213697c..5620b97b3482 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -243,9 +243,11 @@ #define MACB_NCR_TPF_SIZE 1 #define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */ #define MACB_TZQ_SIZE 1 -#define MACB_SRTSM_OFFSET 15 -#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */ +#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */ +#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */ #define MACB_OSSMODE_SIZE 1 +#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */ +#define MACB_MIIONRGMII_SIZE 1 /* Bitfields in NCFGR */ #define MACB_SPD_OFFSET 0 /* Speed */ @@ -713,6 +715,7 @@ #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 +#define MACB_CAPS_MIIONRGMII 0x00000200 #define MACB_CAPS_CLK_HW_CHG 0x04000000 #define MACB_CAPS_MACB_IS_EMAC 0x08000000 #define MACB_CAPS_FIFO_MODE 0x10000000 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index d13fb1d31821..e2730b3e1a57 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -684,6 +684,9 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) { ctrl |= GEM_BIT(PCSSEL); ncr |= GEM_BIT(ENABLE_HS_MAC); + } else if (bp->caps & MACB_CAPS_MIIONRGMII && + bp->phy_interface == PHY_INTERFACE_MODE_MII) { + ncr |= MACB_BIT(MIIONRGMII); } } @@ -4594,7 +4597,8 @@ static const struct macb_config zynq_config = { }; static const struct macb_config sama7g5_gem_config = { - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG | + MACB_CAPS_MIIONRGMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -4602,7 +4606,8 @@ static const struct macb_config sama7g5_gem_config = { }; static const struct macb_config sama7g5_emac_config = { - .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | + MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index c2e1f163bb14..095c5a2144a7 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -38,7 +38,8 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, return NULL; } -static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct macb *bp = container_of(ptp, struct macb, ptp_clock_info); unsigned long flags; @@ -46,7 +47,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) u32 secl, sech; spin_lock_irqsave(&bp->tsu_clk_lock, flags); + ptp_read_system_prets(sts); first = gem_readl(bp, TN); + ptp_read_system_postts(sts); secl = gem_readl(bp, TSL); sech = gem_readl(bp, TSH); second = gem_readl(bp, TN); @@ -56,7 +59,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) /* if so, use later read & re-read seconds * (assume all done within 1s) */ + ptp_read_system_prets(sts); ts->tv_nsec = gem_readl(bp, TN); + ptp_read_system_postts(sts); secl = gem_readl(bp, TSL); sech = gem_readl(bp, TSH); } else { @@ -161,7 +166,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) } if (delta > TSU_NSEC_MAX_VAL) { - gem_tsu_get_time(&bp->ptp_clock_info, &now); + gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL); now = timespec64_add(now, then); gem_tsu_set_time(&bp->ptp_clock_info, @@ -192,7 +197,7 @@ static const struct ptp_clock_info gem_ptp_caps_template = { .pps = 1, .adjfine = gem_ptp_adjfine, .adjtime = gem_ptp_adjtime, - .gettime64 = gem_tsu_get_time, + .gettimex64 = gem_tsu_get_time, .settime64 = gem_tsu_set_time, .enable = gem_ptp_enable, }; @@ -251,7 +256,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1, * The timestamp only contains lower few bits of seconds, * so add value from 1588 timer */ - gem_tsu_get_time(&bp->ptp_clock_info, &tsu); + gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL); /* If the top bit is set in the timestamp, * but not in 1588 timer, it has rolled over, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 2907e13b9df6..dafc79bd34f4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1279,6 +1279,14 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) struct lio *lio; dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); + device_lock(&oct->pci_dev->dev); + if (oct->devlink) { + devlink_unregister(oct->devlink); + devlink_free(oct->devlink); + oct->devlink = NULL; + } + device_unlock(&oct->pci_dev->dev); + if (!oct->ifcount) { dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); return 1; @@ -1300,12 +1308,6 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) liquidio_destroy_nic_device(oct, i); - if (oct->devlink) { - devlink_unregister(oct->devlink); - devlink_free(oct->devlink); - oct->devlink = NULL; - } - dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); return 0; } @@ -3749,10 +3751,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } } + device_lock(&octeon_dev->pci_dev->dev); devlink = devlink_alloc(&liquidio_devlink_ops, sizeof(struct lio_devlink_priv), &octeon_dev->pci_dev->dev); if (!devlink) { + device_unlock(&octeon_dev->pci_dev->dev); dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); goto setup_nic_dev_free; } @@ -3760,15 +3764,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio_devlink = devlink_priv(devlink); lio_devlink->oct = octeon_dev; - if (devlink_register(devlink)) { - devlink_free(devlink); - dev_err(&octeon_dev->pci_dev->dev, - "devlink registration failed\n"); - goto setup_nic_dev_free; - } - octeon_dev->devlink = devlink; octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + devlink_register(devlink); + device_unlock(&octeon_dev->pci_dev->dev); return 0; diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 691e1475d55e..b3d7d1afced7 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1311,9 +1311,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_enable_device(pdev); if (err) { - dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); - return err; + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } err = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index d1667b759522..2b87565781a0 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2119,10 +2119,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } err = pci_enable_device(pdev); - if (err) { - dev_err(dev, "Failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, DRV_NAME); if (err) { diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index c36fed9c3d73..db66d4beb28a 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1597,9 +1597,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pcim_enable_device(pdev); if (err) { - dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); - return err; + return dev_err_probe(dev, err, "Failed to enable PCI device\n"); } err = pci_request_regions(pdev, DRV_NAME); diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c index 873c1c7b4ca0..a19284bdb80e 100644 --- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c +++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c @@ -591,7 +591,7 @@ static void port_stats_update(struct cmac *mac) } hw_stats[] = { #define HW_STAT(reg, stat_name) \ - { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL } + { reg, offsetof(struct cmac_statistics, stat_name) / sizeof(u64) } /* Rx stats */ HW_STAT(RxUnicast, RxUnicastFramesOK), diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 49b76fd47daa..4920a80a0460 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2902,10 +2902,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Initialize generic PCI device state. */ err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "cannot enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); /* * Reserve PCI resources for the device. If we can't get them some diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 12ffc14fbecd..6ded4d9fa32a 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -139,7 +139,7 @@ static void enic_get_drvinfo(struct net_device *netdev, int err; err = enic_dev_fw_info(enic, &fw_info); - /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info + /* return only when dma_alloc_coherent fails in vnic_dev_fw_info * For other failures, like devcmd failure, we return previously * recorded info. */ @@ -270,7 +270,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev, int err; err = enic_dev_stats_dump(enic, &vstats); - /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump * For other failures, like devcmd failure, we return previously * recorded stats. */ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d0a8f7106958..13dbdd5d07b4 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -882,7 +882,7 @@ static void enic_get_stats(struct net_device *netdev, int err; err = enic_dev_stats_dump(enic, &stats); - /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump + /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump * For other failures, like devcmd failure, we return previously * recorded stats. */ diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index ed1ed48e7483..0064ebdaf4b4 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -707,20 +707,16 @@ static int ethoc_mdio_probe(struct net_device *dev) else phy = phy_find_first(priv->mdio); - if (!phy) { - dev_err(&dev->dev, "no PHY found\n"); - return -ENXIO; - } + if (!phy) + return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n"); priv->old_duplex = -1; priv->old_link = -1; err = phy_connect_direct(dev, phy, ethoc_mdio_poll, PHY_INTERFACE_MODE_GMII); - if (err) { - dev_err(&dev->dev, "could not attach to PHY\n"); - return err; - } + if (err) + return dev_err_probe(&dev->dev, err, "could not attach to PHY\n"); phy_set_max_speed(phy, SPEED_100); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c index 605a39f892b9..7fefe1574b6a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c @@ -189,12 +189,11 @@ static const struct devlink_ops dpaa2_eth_devlink_ops = { .trap_group_action_set = dpaa2_eth_dl_trap_group_action_set, }; -int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) +int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv) { struct net_device *net_dev = priv->net_dev; struct device *dev = net_dev->dev.parent; struct dpaa2_eth_devlink_priv *dl_priv; - int err; priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev); @@ -204,25 +203,23 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) } dl_priv = devlink_priv(priv->devlink); dl_priv->dpaa2_priv = priv; - - err = devlink_register(priv->devlink); - if (err) { - dev_err(dev, "devlink_register() = %d\n", err); - goto devlink_free; - } - return 0; +} -devlink_free: +void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv) +{ devlink_free(priv->devlink); +} - return err; + +void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) +{ + devlink_register(priv->devlink); } void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv) { devlink_unregister(priv->devlink); - devlink_free(priv->devlink); } int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 7065c71ed7b8..03c168b1712f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -4431,7 +4431,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) if (err) goto err_connect_mac; - err = dpaa2_eth_dl_register(priv); + err = dpaa2_eth_dl_alloc(priv); if (err) goto err_dl_register; @@ -4453,6 +4453,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) dpaa2_dbg_add(priv); #endif + dpaa2_eth_dl_register(priv); dev_info(dev, "Probed interface %s\n", net_dev->name); return 0; @@ -4461,7 +4462,7 @@ err_netdev_reg: err_dl_port_add: dpaa2_eth_dl_traps_unregister(priv); err_dl_trap_register: - dpaa2_eth_dl_unregister(priv); + dpaa2_eth_dl_free(priv); err_dl_register: dpaa2_eth_disconnect_mac(priv); err_connect_mac: @@ -4508,6 +4509,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); + dpaa2_eth_dl_unregister(priv); + #ifdef CONFIG_DEBUG_FS dpaa2_dbg_remove(priv); #endif @@ -4519,7 +4522,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) dpaa2_eth_dl_port_del(priv); dpaa2_eth_dl_traps_unregister(priv); - dpaa2_eth_dl_unregister(priv); + dpaa2_eth_dl_free(priv); if (priv->do_link_poll) kthread_stop(priv->poll_thread); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index cdb623d5f2c1..628d2d45f045 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -725,7 +725,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops; -int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); +int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv); +void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv); + +void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv); void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv); int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c index ae6d382d8735..543c1f202420 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c @@ -140,6 +140,11 @@ static void dpaa2_mac_validate(struct phylink_config *config, case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_USXGMII: phylink_set(mask, 10000baseT_Full); + phylink_set(mask, 10000baseCR_Full); + phylink_set(mask, 10000baseSR_Full); + phylink_set(mask, 10000baseLR_Full); + phylink_set(mask, 10000baseLRM_Full); + phylink_set(mask, 10000baseER_Full); if (state->interface == PHY_INTERFACE_MODE_10GBASER) break; phylink_set(mask, 5000baseT_Full); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 042327b9981f..3cbfa8b4e265 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2607,10 +2607,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) pcie_flr(pdev); err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 4c977dfc44f0..8c6fa1345996 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -803,10 +803,8 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np) snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); err = of_mdiobus_register(bus, np); - if (err) { - dev_err(dev, "cannot register MDIO bus\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, "cannot register MDIO bus\n"); pf->mdio = bus; @@ -1215,10 +1213,8 @@ static int enetc_pf_probe(struct pci_dev *pdev, ERR_PTR(err)); err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); - if (err) { - dev_err(&pdev->dev, "PCI probing failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); si = pci_get_drvdata(pdev); if (!si->hw.port || !si->hw.global) { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c index bc594892507a..36b4f51dd297 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -39,10 +39,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev, } err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "device enable failed\n"); /* set up for high or low dma */ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 1a9d1e8b772c..0704f6bf12fd 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -143,10 +143,8 @@ static int enetc_vf_probe(struct pci_dev *pdev, int err; err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0); - if (err) { - dev_err(&pdev->dev, "PCI probing failed\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "PCI probing failed\n"); si = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 1d3188e8e3b3..85bf825606e8 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -224,11 +224,6 @@ struct gve_tx_iovec { u32 iov_padding; /* padding associated with this segment */ }; -struct gve_tx_dma_buf { - DEFINE_DMA_UNMAP_ADDR(dma); - DEFINE_DMA_UNMAP_LEN(len); -}; - /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc * ring entry but only used for a pkt_desc not a seg_desc */ @@ -236,7 +231,10 @@ struct gve_tx_buffer_state { struct sk_buff *skb; /* skb for this pkt */ union { struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */ - struct gve_tx_dma_buf buf; + struct { + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + }; }; }; @@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo { * All others correspond to `skb`'s frags and should be unmapped with * `dma_unmap_page`. */ - struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1]; + DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]); + DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]); u16 num_bufs; /* Linked list index to next element in the list, or -1 if none */ diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 099a2bc5ae67..cd9df68cc01e 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -268,7 +268,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv) int i, j; int err; - priv->msix_vectors = kvzalloc(num_vecs_requested * + priv->msix_vectors = kvcalloc(num_vecs_requested, sizeof(*priv->msix_vectors), GFP_KERNEL); if (!priv->msix_vectors) return -ENOMEM; @@ -628,7 +628,7 @@ static int gve_alloc_rings(struct gve_priv *priv) int err; /* Setup tx rings */ - priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx), + priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), GFP_KERNEL); if (!priv->tx) return -ENOMEM; @@ -641,7 +641,7 @@ static int gve_alloc_rings(struct gve_priv *priv) goto free_tx; /* Setup rx rings */ - priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx), + priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), GFP_KERNEL); if (!priv->rx) { err = -ENOMEM; @@ -764,12 +764,11 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, qpl->id = id; qpl->num_entries = 0; - qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL); + qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); /* caller handles clean up */ if (!qpl->pages) return -ENOMEM; - qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses), - GFP_KERNEL); + qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL); /* caller handles clean up */ if (!qpl->page_buses) return -ENOMEM; @@ -828,7 +827,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) if (priv->queue_format == GVE_GQI_RDA_FORMAT) return 0; - priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL); + priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); if (!priv->qpls) return -ENOMEM; @@ -847,7 +846,7 @@ static int gve_alloc_qpls(struct gve_priv *priv) priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * sizeof(unsigned long) * BITS_PER_BYTE; - priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) * + priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), sizeof(unsigned long), GFP_KERNEL); if (!priv->qpl_cfg.qpl_id_map) { err = -ENOMEM; diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 665ac795a1ad..9922ce46a635 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -303,15 +303,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx, static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info) { if (info->skb) { - dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma), - dma_unmap_len(&info->buf, len), + dma_unmap_single(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), DMA_TO_DEVICE); - dma_unmap_len_set(&info->buf, len, 0); + dma_unmap_len_set(info, len, 0); } else { - dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma), - dma_unmap_len(&info->buf, len), + dma_unmap_page(dev, dma_unmap_addr(info, dma), + dma_unmap_len(info, len), DMA_TO_DEVICE); - dma_unmap_len_set(&info->buf, len, 0); + dma_unmap_len_set(info, len, 0); } } @@ -491,7 +491,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct gve_tx_buffer_state *info; bool is_gso = skb_is_gso(skb); u32 idx = tx->req & tx->mask; - struct gve_tx_dma_buf *buf; u64 addr; u32 len; int i; @@ -515,9 +514,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dma_mapping_error++; goto drop; } - buf = &info->buf; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); + dma_unmap_len_set(info, len, len); + dma_unmap_addr_set(info, dma, addr); payload_nfrags = shinfo->nr_frags; if (hlen < len) { @@ -549,10 +547,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dma_mapping_error++; goto unmap_drop; } - buf = &tx->info[idx].buf; tx->info[idx].skb = NULL; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); + dma_unmap_len_set(&tx->info[idx], len, len); + dma_unmap_addr_set(&tx->info[idx], dma, addr); gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr); } diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 05ddb6a75c38..ec394d991668 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) int j; for (j = 0; j < cur_state->num_bufs; j++) { - struct gve_tx_dma_buf *buf = &cur_state->bufs[j]; - if (j == 0) { dma_unmap_single(tx->dev, - dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), - DMA_TO_DEVICE); + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); } else { dma_unmap_page(tx->dev, - dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), - DMA_TO_DEVICE); + dma_unmap_addr(cur_state, dma[j]), + dma_unmap_len(cur_state, len[j]), + DMA_TO_DEVICE); } } if (cur_state->skb) { @@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, const bool is_gso = skb_is_gso(skb); u32 desc_idx = tx->dqo_tx.tail; - struct gve_tx_pending_packet_dqo *pending_packet; + struct gve_tx_pending_packet_dqo *pkt; struct gve_tx_metadata_dqo metadata; s16 completion_tag; int i; - pending_packet = gve_alloc_pending_packet(tx); - pending_packet->skb = skb; - pending_packet->num_bufs = 0; - completion_tag = pending_packet - tx->dqo.pending_packets; + pkt = gve_alloc_pending_packet(tx); + pkt->skb = skb; + pkt->num_bufs = 0; + completion_tag = pkt - tx->dqo.pending_packets; gve_extract_tx_metadata_dqo(skb, &metadata); if (is_gso) { @@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, /* Map the linear portion of skb */ { - struct gve_tx_dma_buf *buf = - &pending_packet->bufs[pending_packet->num_bufs]; u32 len = skb_headlen(skb); dma_addr_t addr; @@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, if (unlikely(dma_mapping_error(tx->dev, addr))) goto err; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); - ++pending_packet->num_bufs; + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, completion_tag, @@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, } for (i = 0; i < shinfo->nr_frags; i++) { - struct gve_tx_dma_buf *buf = - &pending_packet->bufs[pending_packet->num_bufs]; const skb_frag_t *frag = &shinfo->frags[i]; bool is_eop = i == (shinfo->nr_frags - 1); u32 len = skb_frag_size(frag); @@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, if (unlikely(dma_mapping_error(tx->dev, addr))) goto err; - dma_unmap_len_set(buf, len, len); - dma_unmap_addr_set(buf, dma, addr); - ++pending_packet->num_bufs; + dma_unmap_len_set(pkt, len[pkt->num_bufs], len); + dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr, completion_tag, is_eop, is_gso); @@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, return 0; err: - for (i = 0; i < pending_packet->num_bufs; i++) { - struct gve_tx_dma_buf *buf = &pending_packet->bufs[i]; - + for (i = 0; i < pkt->num_bufs; i++) { if (i == 0) { - dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), + dma_unmap_single(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); } else { - dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); + dma_unmap_page(tx->dev, + dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE); } } - pending_packet->skb = NULL; - pending_packet->num_bufs = 0; - gve_free_pending_packet(tx, pending_packet); + pkt->skb = NULL; + pkt->num_bufs = 0; + gve_free_pending_packet(tx, pkt); return -1; } @@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list, static void remove_from_list(struct gve_tx_ring *tx, struct gve_index_list *list, - struct gve_tx_pending_packet_dqo *pending_packet) + struct gve_tx_pending_packet_dqo *pkt) { s16 prev_index, next_index; - prev_index = pending_packet->prev; - next_index = pending_packet->next; + prev_index = pkt->prev; + next_index = pkt->next; if (prev_index == -1) { /* Node is head */ @@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx, } static void gve_unmap_packet(struct device *dev, - struct gve_tx_pending_packet_dqo *pending_packet) + struct gve_tx_pending_packet_dqo *pkt) { - struct gve_tx_dma_buf *buf; int i; /* SKB linear portion is guaranteed to be mapped */ - buf = &pending_packet->bufs[0]; - dma_unmap_single(dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); - for (i = 1; i < pending_packet->num_bufs; i++) { - buf = &pending_packet->bufs[i]; - dma_unmap_page(dev, dma_unmap_addr(buf, dma), - dma_unmap_len(buf, len), DMA_TO_DEVICE); + dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), + dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); + for (i = 1; i < pkt->num_bufs; i++) { + dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); } - pending_packet->num_bufs = 0; + pkt->num_bufs = 0; } /* Completion types and expected behavior: diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8ba21d6dc220..17872c50b63c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS { HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, + HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, }; #define hnae3_dev_fd_supported(hdev) \ @@ -151,6 +152,9 @@ enum HNAE3_DEV_CAP_BITS { #define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps) +#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@ -341,6 +345,8 @@ struct hnae3_dev_specs { u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ u16 max_frm_size; u16 max_qset_num; + u16 umv_size; + u16 mc_mac_size; }; struct hnae3_client_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 2b66c59f5eaf..a1555f074e06 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -924,6 +924,10 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos) dev_specs->max_tm_rate); *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n", dev_specs->max_qset_num); + *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n", + dev_specs->umv_size); + *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n", + dev_specs->mc_mac_size); } static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 33244472e0d0..bfcfefa9d2b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -1188,7 +1188,10 @@ struct hclge_dev_specs_1_cmd { __le16 max_frm_size; __le16 max_qset_num; __le16 max_int_gl; - u8 rsv1[18]; + u8 rsv0[2]; + __le16 umv_size; + __le16 mc_mac_size; + u8 rsv1[12]; }; /* mac speed type defined in firmware command */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 32f62cd2dd99..f0aa4fbd2200 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -1992,6 +1992,9 @@ static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len) } mutex_unlock(&hdev->vport_lock); + pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n", + hdev->used_mc_mac_num); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c index e4aad695abcc..59b0ae7d59e0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -109,7 +109,6 @@ int hclge_devlink_init(struct hclge_dev *hdev) struct pci_dev *pdev = hdev->pdev; struct hclge_devlink_priv *priv; struct devlink *devlink; - int ret; devlink = devlink_alloc(&hclge_devlink_ops, sizeof(struct hclge_devlink_priv), &pdev->dev); @@ -120,20 +119,9 @@ int hclge_devlink_init(struct hclge_dev *hdev) priv->hdev = hdev; hdev->devlink = devlink; - ret = devlink_register(devlink); - if (ret) { - dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", - ret); - goto out_reg_fail; - } - + devlink_register(devlink); devlink_reload_enable(devlink); - return 0; - -out_reg_fail: - devlink_free(devlink); - return ret; } void hclge_devlink_uninit(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f5b8d1fee0f1..cd981b33d4ff 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1342,8 +1342,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_UMV_TBL_SPACE_M, HCLGE_CFG_UMV_TBL_SPACE_S); - if (!cfg->umv_space) - cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF; cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), HCLGE_CFG_PF_RSS_SIZE_M, @@ -1419,6 +1417,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev) ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; + ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } static void hclge_parse_dev_specs(struct hclge_dev *hdev, @@ -1440,6 +1439,8 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev, ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); + ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); + ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); } static void hclge_check_dev_specs(struct hclge_dev *hdev) @@ -1460,6 +1461,8 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev) dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; if (!dev_specs->max_frm_size) dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; + if (!dev_specs->umv_size) + dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; } static int hclge_query_dev_specs(struct hclge_dev *hdev) @@ -1549,7 +1552,10 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; - hdev->wanted_umv_size = cfg.umv_space; + if (cfg.umv_space) + hdev->wanted_umv_size = cfg.umv_space; + else + hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) @@ -8498,6 +8504,9 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); + if (hdev->ae_dev->dev_specs.mc_mac_size) + set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); + return 0; } @@ -8515,6 +8524,8 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev) hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); mutex_unlock(&hdev->vport_lock); + + hdev->used_mc_mac_num = 0; } static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) @@ -8769,6 +8780,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; + bool is_new_addr = false; int status; /* mac addr check */ @@ -8782,6 +8794,13 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (status) { + if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && + hdev->used_mc_mac_num >= + hdev->ae_dev->dev_specs.mc_mac_size) + goto err_no_space; + + is_new_addr = true; + /* This mac addr do not exist, add new entry for it */ memset(desc[0].data, 0, sizeof(desc[0].data)); memset(desc[1].data, 0, sizeof(desc[0].data)); @@ -8791,12 +8810,18 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, if (status) return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - /* if already overflow, not to print each time */ - if (status == -ENOSPC && - !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) - dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + if (status == -ENOSPC) + goto err_no_space; + else if (!status && is_new_addr) + hdev->used_mc_mac_num++; return status; + +err_no_space: + /* if already overflow, not to print each time */ + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + return -ENOSPC; } static int hclge_rm_mc_addr(struct hnae3_handle *handle, @@ -8833,12 +8858,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, if (status) return status; - if (hclge_is_all_function_id_zero(desc)) + if (hclge_is_all_function_id_zero(desc)) { /* All the vfid is zero, so need to delete this entry */ status = hclge_remove_mac_vlan_tbl(vport, &req); - else + if (!status) + hdev->used_mc_mac_num--; + } else { /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } } else if (status == -ENOENT) { status = 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index de6afbcbfbac..ca25e2edf3f0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -938,6 +938,8 @@ struct hclge_dev { u16 priv_umv_size; /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; + /* multicast mac address number used by PF and its VFs */ + u16 used_mc_mac_num; DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c index f478770299c6..d60cc9426f70 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c @@ -110,7 +110,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev) struct pci_dev *pdev = hdev->pdev; struct hclgevf_devlink_priv *priv; struct devlink *devlink; - int ret; devlink = devlink_alloc(&hclgevf_devlink_ops, @@ -122,20 +121,9 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev) priv->hdev = hdev; hdev->devlink = devlink; - ret = devlink_register(devlink); - if (ret) { - dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", - ret); - goto out_reg_fail; - } - + devlink_register(devlink); devlink_reload_enable(devlink); - return 0; - -out_reg_fail: - devlink_free(devlink); - return ret; } void hclgevf_devlink_uninit(struct hclgevf_dev *hdev) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 6e11ee339f12..60ae8bfc5f69 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink) devlink_free(devlink); } -int hinic_devlink_register(struct hinic_devlink_priv *priv) +void hinic_devlink_register(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); - return devlink_register(devlink); + devlink_register(devlink); } void hinic_devlink_unregister(struct hinic_devlink_priv *priv) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h index 9e315011015c..46760d607b9b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h @@ -110,7 +110,7 @@ struct host_image_st { struct devlink *hinic_devlink_alloc(struct device *dev); void hinic_devlink_free(struct devlink *devlink); -int hinic_devlink_register(struct hinic_devlink_priv *priv); +void hinic_devlink_register(struct hinic_devlink_priv *priv); void hinic_devlink_unregister(struct hinic_devlink_priv *priv); int hinic_health_reporters_create(struct hinic_devlink_priv *priv); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 56b6b04e209b..657a15447bd0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -754,17 +754,9 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) return err; } - err = hinic_devlink_register(hwdev->devlink_dev); - if (err) { - dev_err(&hwif->pdev->dev, "Failed to register devlink\n"); - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); - return err; - } - err = hinic_func_to_func_init(hwdev); if (err) { dev_err(&hwif->pdev->dev, "Failed to init mailbox\n"); - hinic_devlink_unregister(hwdev->devlink_dev); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); return err; } @@ -787,7 +779,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) } hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); - + hinic_devlink_register(hwdev->devlink_dev); return 0; } @@ -799,6 +791,7 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) { struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + hinic_devlink_unregister(hwdev->devlink_dev); hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); if (!HINIC_IS_VF(hwdev->hwif)) { @@ -816,8 +809,6 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) hinic_func_to_func_free(hwdev); - hinic_devlink_unregister(hwdev->devlink_dev); - hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index ae707e305684..9965e8d5d0a9 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1379,10 +1379,8 @@ static int hinic_probe(struct pci_dev *pdev, { int err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "Failed to enable PCI device\n"); - return err; - } + if (err) + return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n"); err = pci_request_regions(pdev, HINIC_DRV_NAME); if (err) { diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 664a91af662d..523c488c71f6 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2848,7 +2848,6 @@ static int emac_init_phy(struct emac_instance *dev) static int emac_init_config(struct emac_instance *dev) { struct device_node *np = dev->ofdev->dev.of_node; - const void *p; int err; /* Read config from device-tree */ @@ -2976,13 +2975,12 @@ static int emac_init_config(struct emac_instance *dev) } /* Read MAC-address */ - p = of_get_property(np, "local-mac-address", NULL); - if (p == NULL) { - printk(KERN_ERR "%pOF: Can't find local-mac-address property\n", - np); - return -ENXIO; + err = of_get_mac_address(np, dev->ndev->dev_addr); + if (err) { + if (err != -EPROBE_DEFER) + dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n"); + return err; } - memcpy(dev->ndev->dev_addr, p, ETH_ALEN); /* IAHT and GAHT filter parameterization */ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) { diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 3d9b4f99d357..3aedb680adb8 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -605,17 +605,13 @@ static int ibmveth_open(struct net_device *netdev) } rc = -ENOMEM; - adapter->bounce_buffer = - kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); - if (!adapter->bounce_buffer) - goto out_free_irq; - adapter->bounce_buffer_dma = - dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, - netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { - netdev_err(netdev, "unable to map bounce buffer\n"); - goto out_free_bounce_buffer; + adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev, + netdev->mtu + IBMVETH_BUFF_OH, + &adapter->bounce_buffer_dma, GFP_KERNEL); + if (!adapter->bounce_buffer) { + netdev_err(netdev, "unable to alloc bounce buffer\n"); + goto out_free_irq; } netdev_dbg(netdev, "initial replenish cycle\n"); @@ -627,8 +623,6 @@ static int ibmveth_open(struct net_device *netdev) return 0; -out_free_bounce_buffer: - kfree(adapter->bounce_buffer); out_free_irq: free_irq(netdev->irq, netdev); out_free_buffer_pools: @@ -702,10 +696,9 @@ static int ibmveth_close(struct net_device *netdev) ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]); - dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma, - adapter->netdev->mtu + IBMVETH_BUFF_OH, - DMA_BIDIRECTIONAL); - kfree(adapter->bounce_buffer); + dma_free_coherent(&adapter->vdev->dev, + adapter->netdev->mtu + IBMVETH_BUFF_OH, + adapter->bounce_buffer, adapter->bounce_buffer_dma); netdev_dbg(netdev, "close complete\n"); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6aa6ff89a765..21efd76dd427 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter); static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *tx_scrq); +static void free_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, return -ETIMEDOUT; } +/** + * reuse_ltb() - Check if a long term buffer can be reused + * @ltb: The long term buffer to be checked + * @size: The size of the long term buffer. + * + * An LTB can be reused unless its size has changed. + * + * Return: Return true if the LTB can be reused, false otherwise. + */ +static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size) +{ + return (ltb->buff && ltb->size == size); +} + +/** + * alloc_long_term_buff() - Allocate a long term buffer (LTB) + * + * @adapter: ibmvnic adapter associated to the LTB + * @ltb: container object for the LTB + * @size: size of the LTB + * + * Allocate an LTB of the specified size and notify VIOS. + * + * If the given @ltb already has the correct size, reuse it. Otherwise if + * its non-NULL, free it. Then allocate a new one of the correct size. + * Notify the VIOS either way since we may now be working with a new VIOS. + * + * Allocating larger chunks of memory during resets, specially LPM or under + * low memory situations can cause resets to fail/timeout and for LPAR to + * lose connectivity. So hold onto the LTB even if we fail to communicate + * with the VIOS and reuse it on next open. Free LTB when adapter is closed. + * + * Return: 0 if we were able to allocate the LTB and notify the VIOS and + * a negative value otherwise. + */ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { struct device *dev = &adapter->vdev->dev; int rc; - ltb->size = size; - ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr, - GFP_KERNEL); + if (!reuse_ltb(ltb, size)) { + dev_dbg(dev, + "LTB size changed from 0x%llx to 0x%x, reallocating\n", + ltb->size, size); + free_long_term_buff(adapter, ltb); + } - if (!ltb->buff) { - dev_err(dev, "Couldn't alloc long term buffer\n"); - return -ENOMEM; + if (ltb->buff) { + dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n", + ltb->map_id, ltb->size); + } else { + ltb->buff = dma_alloc_coherent(dev, size, <b->addr, + GFP_KERNEL); + if (!ltb->buff) { + dev_err(dev, "Couldn't alloc long term buffer\n"); + return -ENOMEM; + } + ltb->size = size; + + ltb->map_id = find_first_zero_bit(adapter->map_ids, + MAX_MAP_ID); + bitmap_set(adapter->map_ids, ltb->map_id, 1); + + dev_dbg(dev, + "Allocated new LTB [map %d, size 0x%llx]\n", + ltb->map_id, ltb->size); } - ltb->map_id = adapter->map_id; - adapter->map_id++; + + /* Ensure ltb is zeroed - specially when reusing it. */ + memset(ltb->buff, 0, ltb->size); mutex_lock(&adapter->fw_lock); adapter->fw_done_rc = 0; @@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); if (rc) { - dev_err(dev, - "Long term map request aborted or timed out,rc = %d\n", + dev_err(dev, "LTB map request aborted or timed out, rc = %d\n", rc); goto out; } if (adapter->fw_done_rc) { - dev_err(dev, "Couldn't map long term buffer,rc = %d\n", + dev_err(dev, "Couldn't map LTB, rc = %d\n", adapter->fw_done_rc); rc = -1; goto out; } rc = 0; out: - if (rc) { - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - ltb->buff = NULL; - } + /* don't free LTB on communication error - see function header */ mutex_unlock(&adapter->fw_lock); return rc; } @@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_TIMEOUT) send_request_unmap(adapter, ltb->map_id); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); + ltb->buff = NULL; + /* mark this map_id free */ + bitmap_clear(adapter->map_ids, ltb->map_id, 1); ltb->map_id = 0; } -static int reset_long_term_buff(struct ibmvnic_adapter *adapter, - struct ibmvnic_long_term_buff *ltb) -{ - struct device *dev = &adapter->vdev->dev; - int rc; - - memset(ltb->buff, 0, ltb->size); - - mutex_lock(&adapter->fw_lock); - adapter->fw_done_rc = 0; - - reinit_completion(&adapter->fw_done); - rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - if (rc) { - mutex_unlock(&adapter->fw_lock); - return rc; - } - - rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); - if (rc) { - dev_info(dev, - "Reset failed, long term map request timed out or aborted\n"); - mutex_unlock(&adapter->fw_lock); - return rc; - } - - if (adapter->fw_done_rc) { - dev_info(dev, - "Reset failed, attempting to free and reallocate buffer\n"); - free_long_term_buff(adapter, ltb); - mutex_unlock(&adapter->fw_lock); - return alloc_long_term_buff(adapter, ltb, ltb->size); - } - mutex_unlock(&adapter->fw_lock); - return 0; -} - static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) { int i; @@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, * be 0. */ for (i = ind_bufp->index; i < count; ++i) { - skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); + index = pool->free_map[pool->next_free]; + + /* We maybe reusing the skb from earlier resets. Allocate + * only if necessary. But since the LTB may have changed + * during reset (see init_rx_pools()), update LTB below + * even if reusing skb. + */ + skb = pool->rx_buff[index].skb; if (!skb) { - dev_err(dev, "Couldn't replenish rx buff\n"); - adapter->replenish_no_mem++; - break; + skb = netdev_alloc_skb(adapter->netdev, + pool->buff_size); + if (!skb) { + dev_err(dev, "Couldn't replenish rx buff\n"); + adapter->replenish_no_mem++; + break; + } } - index = pool->free_map[pool->next_free]; - - if (pool->rx_buff[index].skb) - dev_err(dev, "Inconsistent free_map!\n"); + pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + pool->next_free = (pool->next_free + 1) % pool->size; /* Copy the skb to the long term mapped DMA buffer */ offset = index * pool->buff_size; dst = pool->long_term_buff.buff + offset; memset(dst, 0, pool->buff_size); dma_addr = pool->long_term_buff.addr + offset; - pool->rx_buff[index].data = dst; - pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; + /* add the skb to an rx_buff in the pool */ + pool->rx_buff[index].data = dst; pool->rx_buff[index].dma = dma_addr; pool->rx_buff[index].skb = skb; pool->rx_buff[index].pool_index = pool->index; pool->rx_buff[index].size = pool->buff_size; + /* queue the rx_buff for the next send_subcrq_indirect */ sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; memset(sub_crq, 0, sizeof(*sub_crq)); sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; @@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, shift = 8; #endif sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); - pool->next_free = (pool->next_free + 1) % pool->size; + + /* if send_subcrq_indirect queue is full, flush to VIOS */ if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || i == count - 1) { lpar_rc = @@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) return 0; } -static int reset_rx_pools(struct ibmvnic_adapter *adapter) -{ - struct ibmvnic_rx_pool *rx_pool; - u64 buff_size; - int rx_scrqs; - int i, j, rc; - - if (!adapter->rx_pool) - return -1; - - buff_size = adapter->cur_rx_buf_sz; - rx_scrqs = adapter->num_active_rx_pools; - for (i = 0; i < rx_scrqs; i++) { - rx_pool = &adapter->rx_pool[i]; - - netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); - - if (rx_pool->buff_size != buff_size) { - free_long_term_buff(adapter, &rx_pool->long_term_buff); - rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); - rc = alloc_long_term_buff(adapter, - &rx_pool->long_term_buff, - rx_pool->size * - rx_pool->buff_size); - } else { - rc = reset_long_term_buff(adapter, - &rx_pool->long_term_buff); - } - - if (rc) - return rc; - - for (j = 0; j < rx_pool->size; j++) - rx_pool->free_map[j] = j; - - memset(rx_pool->rx_buff, 0, - rx_pool->size * sizeof(struct ibmvnic_rx_buff)); - - atomic_set(&rx_pool->available, 0); - rx_pool->next_alloc = 0; - rx_pool->next_free = 0; - rx_pool->active = 1; - } - - return 0; -} - +/** + * release_rx_pools() - Release any rx pools attached to @adapter. + * @adapter: ibmvnic adapter + * + * Safe to call this multiple times - even if no pools are attached. + */ static void release_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; @@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); kfree(rx_pool->free_map); + free_long_term_buff(adapter, &rx_pool->long_term_buff); if (!rx_pool->rx_buff) @@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) kfree(adapter->rx_pool); adapter->rx_pool = NULL; adapter->num_active_rx_pools = 0; + adapter->prev_rx_pool_size = 0; } +/** + * reuse_rx_pools() - Check if the existing rx pools can be reused. + * @adapter: ibmvnic adapter + * + * Check if the existing rx pools in the adapter can be reused. The + * pools can be reused if the pool parameters (number of pools, + * number of buffers in the pool and size of each buffer) have not + * changed. + * + * NOTE: This assumes that all pools have the same number of buffers + * which is the case currently. If that changes, we must fix this. + * + * Return: true if the rx pools can be reused, false otherwise. + */ +static bool reuse_rx_pools(struct ibmvnic_adapter *adapter) +{ + u64 old_num_pools, new_num_pools; + u64 old_pool_size, new_pool_size; + u64 old_buff_size, new_buff_size; + + if (!adapter->rx_pool) + return false; + + old_num_pools = adapter->num_active_rx_pools; + new_num_pools = adapter->req_rx_queues; + + old_pool_size = adapter->prev_rx_pool_size; + new_pool_size = adapter->req_rx_add_entries_per_subcrq; + + old_buff_size = adapter->prev_rx_buf_sz; + new_buff_size = adapter->cur_rx_buf_sz; + + /* Require buff size to be exactly same for now */ + if (old_buff_size != new_buff_size) + return false; + + if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) + return true; + + if (old_num_pools < adapter->min_rx_queues || + old_num_pools > adapter->max_rx_queues || + old_pool_size < adapter->min_rx_add_entries_per_subcrq || + old_pool_size > adapter->max_rx_add_entries_per_subcrq) + return false; + + return true; +} + +/** + * init_rx_pools(): Initialize the set of receiver pools in the adapter. + * @netdev: net device associated with the vnic interface + * + * Initialize the set of receiver pools in the ibmvnic adapter associated + * with the net_device @netdev. If possible, reuse the existing rx pools. + * Otherwise free any existing pools and allocate a new set of pools + * before initializing them. + * + * Return: 0 on success and negative value on error. + */ static int init_rx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->vdev->dev; struct ibmvnic_rx_pool *rx_pool; - int rxadd_subcrqs; + u64 num_pools; + u64 pool_size; /* # of buffers in one pool */ u64 buff_size; int i, j; - rxadd_subcrqs = adapter->num_active_rx_scrqs; + pool_size = adapter->req_rx_add_entries_per_subcrq; + num_pools = adapter->req_rx_queues; buff_size = adapter->cur_rx_buf_sz; - adapter->rx_pool = kcalloc(rxadd_subcrqs, + if (reuse_rx_pools(adapter)) { + dev_dbg(dev, "Reusing rx pools\n"); + goto update_ltb; + } + + /* Allocate/populate the pools. */ + release_rx_pools(adapter); + + adapter->rx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL); if (!adapter->rx_pool) { @@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev) return -1; } - adapter->num_active_rx_pools = rxadd_subcrqs; + /* Set num_active_rx_pools early. If we fail below after partial + * allocation, release_rx_pools() will know how many to look for. + */ + adapter->num_active_rx_pools = num_pools; - for (i = 0; i < rxadd_subcrqs; i++) { + for (i = 0; i < num_pools; i++) { rx_pool = &adapter->rx_pool[i]; netdev_dbg(adapter->netdev, "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", - i, adapter->req_rx_add_entries_per_subcrq, - buff_size); + i, pool_size, buff_size); - rx_pool->size = adapter->req_rx_add_entries_per_subcrq; + rx_pool->size = pool_size; rx_pool->index = i; rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); - rx_pool->active = 1; rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), GFP_KERNEL); if (!rx_pool->free_map) { - release_rx_pools(adapter); - return -1; + dev_err(dev, "Couldn't alloc free_map %d\n", i); + goto out_release; } rx_pool->rx_buff = kcalloc(rx_pool->size, @@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev) GFP_KERNEL); if (!rx_pool->rx_buff) { dev_err(dev, "Couldn't alloc rx buffers\n"); - release_rx_pools(adapter); - return -1; + goto out_release; } - - if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, - rx_pool->size * rx_pool->buff_size)) { - release_rx_pools(adapter); - return -1; - } - - for (j = 0; j < rx_pool->size; ++j) - rx_pool->free_map[j] = j; - - atomic_set(&rx_pool->available, 0); - rx_pool->next_alloc = 0; - rx_pool->next_free = 0; } - return 0; -} - -static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, - struct ibmvnic_tx_pool *tx_pool) -{ - int rc, i; - - rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); - if (rc) - return rc; + adapter->prev_rx_pool_size = pool_size; + adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz; - memset(tx_pool->tx_buff, 0, - tx_pool->num_buffers * - sizeof(struct ibmvnic_tx_buff)); - - for (i = 0; i < tx_pool->num_buffers; i++) - tx_pool->free_map[i] = i; +update_ltb: + for (i = 0; i < num_pools; i++) { + rx_pool = &adapter->rx_pool[i]; + dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n", + i, rx_pool->size, rx_pool->buff_size); - tx_pool->consumer_index = 0; - tx_pool->producer_index = 0; + if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff, + rx_pool->size * rx_pool->buff_size)) + goto out; - return 0; -} + for (j = 0; j < rx_pool->size; ++j) { + struct ibmvnic_rx_buff *rx_buff; -static int reset_tx_pools(struct ibmvnic_adapter *adapter) -{ - int tx_scrqs; - int i, rc; + rx_pool->free_map[j] = j; - if (!adapter->tx_pool) - return -1; + /* NOTE: Don't clear rx_buff->skb here - will leak + * memory! replenish_rx_pool() will reuse skbs or + * allocate as necessary. + */ + rx_buff = &rx_pool->rx_buff[j]; + rx_buff->dma = 0; + rx_buff->data = 0; + rx_buff->size = 0; + rx_buff->pool_index = 0; + } - tx_scrqs = adapter->num_active_tx_pools; - for (i = 0; i < tx_scrqs; i++) { - ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); - rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); - if (rc) - return rc; - rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); - if (rc) - return rc; + /* Mark pool "empty" so replenish_rx_pools() will + * update the LTB info for each buffer + */ + atomic_set(&rx_pool->available, 0); + rx_pool->next_alloc = 0; + rx_pool->next_free = 0; + /* replenish_rx_pool() may have called deactivate_rx_pools() + * on failover. Ensure pool is active now. + */ + rx_pool->active = 1; } - return 0; +out_release: + release_rx_pools(adapter); +out: + /* We failed to allocate one or more LTBs or map them on the VIOS. + * Hold onto the pools and any LTBs that we did allocate/map. + */ + return -1; } static void release_vpd_data(struct ibmvnic_adapter *adapter) @@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter, free_long_term_buff(adapter, &tx_pool->long_term_buff); } +/** + * release_tx_pools() - Release any tx pools attached to @adapter. + * @adapter: ibmvnic adapter + * + * Safe to call this multiple times - even if no pools are attached. + */ static void release_tx_pools(struct ibmvnic_adapter *adapter) { int i; + /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are + * both NULL or both non-NULL. So we only need to check one. + */ if (!adapter->tx_pool) return; @@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) kfree(adapter->tso_pool); adapter->tso_pool = NULL; adapter->num_active_tx_pools = 0; + adapter->prev_tx_pool_size = 0; } static int init_one_tx_pool(struct net_device *netdev, struct ibmvnic_tx_pool *tx_pool, - int num_entries, int buf_size) + int pool_size, int buf_size) { - struct ibmvnic_adapter *adapter = netdev_priv(netdev); int i; - tx_pool->tx_buff = kcalloc(num_entries, + tx_pool->tx_buff = kcalloc(pool_size, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); if (!tx_pool->tx_buff) return -1; - if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, - num_entries * buf_size)) - return -1; - - tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL); - if (!tx_pool->free_map) + tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL); + if (!tx_pool->free_map) { + kfree(tx_pool->tx_buff); + tx_pool->tx_buff = NULL; return -1; + } - for (i = 0; i < num_entries; i++) + for (i = 0; i < pool_size; i++) tx_pool->free_map[i] = i; tx_pool->consumer_index = 0; tx_pool->producer_index = 0; - tx_pool->num_buffers = num_entries; + tx_pool->num_buffers = pool_size; tx_pool->buf_size = buf_size; return 0; } +/** + * reuse_tx_pools() - Check if the existing tx pools can be reused. + * @adapter: ibmvnic adapter + * + * Check if the existing tx pools in the adapter can be reused. The + * pools can be reused if the pool parameters (number of pools, + * number of buffers in the pool and mtu) have not changed. + * + * NOTE: This assumes that all pools have the same number of buffers + * which is the case currently. If that changes, we must fix this. + * + * Return: true if the tx pools can be reused, false otherwise. + */ +static bool reuse_tx_pools(struct ibmvnic_adapter *adapter) +{ + u64 old_num_pools, new_num_pools; + u64 old_pool_size, new_pool_size; + u64 old_mtu, new_mtu; + + if (!adapter->tx_pool) + return false; + + old_num_pools = adapter->num_active_tx_pools; + new_num_pools = adapter->num_active_tx_scrqs; + old_pool_size = adapter->prev_tx_pool_size; + new_pool_size = adapter->req_tx_entries_per_subcrq; + old_mtu = adapter->prev_mtu; + new_mtu = adapter->req_mtu; + + /* Require MTU to be exactly same to reuse pools for now */ + if (old_mtu != new_mtu) + return false; + + if (old_num_pools == new_num_pools && old_pool_size == new_pool_size) + return true; + + if (old_num_pools < adapter->min_tx_queues || + old_num_pools > adapter->max_tx_queues || + old_pool_size < adapter->min_tx_entries_per_subcrq || + old_pool_size > adapter->max_tx_entries_per_subcrq) + return false; + + return true; +} + +/** + * init_tx_pools(): Initialize the set of transmit pools in the adapter. + * @netdev: net device associated with the vnic interface + * + * Initialize the set of transmit pools in the ibmvnic adapter associated + * with the net_device @netdev. If possible, reuse the existing tx pools. + * Otherwise free any existing pools and allocate a new set of pools + * before initializing them. + * + * Return: 0 on success and negative value on error. + */ static int init_tx_pools(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int tx_subcrqs; + struct device *dev = &adapter->vdev->dev; + int num_pools; + u64 pool_size; /* # of buffers in pool */ u64 buff_size; - int i, rc; + int i, j, rc; + + num_pools = adapter->req_tx_queues; - tx_subcrqs = adapter->num_active_tx_scrqs; - adapter->tx_pool = kcalloc(tx_subcrqs, + /* We must notify the VIOS about the LTB on all resets - but we only + * need to alloc/populate pools if either the number of buffers or + * size of each buffer in the pool has changed. + */ + if (reuse_tx_pools(adapter)) { + netdev_dbg(netdev, "Reusing tx pools\n"); + goto update_ltb; + } + + /* Allocate/populate the pools. */ + release_tx_pools(adapter); + + pool_size = adapter->req_tx_entries_per_subcrq; + num_pools = adapter->num_active_tx_scrqs; + + adapter->tx_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); if (!adapter->tx_pool) return -1; - adapter->tso_pool = kcalloc(tx_subcrqs, + adapter->tso_pool = kcalloc(num_pools, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); + /* To simplify release_tx_pools() ensure that ->tx_pool and + * ->tso_pool are either both NULL or both non-NULL. + */ if (!adapter->tso_pool) { kfree(adapter->tx_pool); adapter->tx_pool = NULL; return -1; } - adapter->num_active_tx_pools = tx_subcrqs; + /* Set num_active_tx_pools early. If we fail below after partial + * allocation, release_tx_pools() will know how many to look for. + */ + adapter->num_active_tx_pools = num_pools; + + buff_size = adapter->req_mtu + VLAN_HLEN; + buff_size = ALIGN(buff_size, L1_CACHE_BYTES); + + for (i = 0; i < num_pools; i++) { + dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n", + i, adapter->req_tx_entries_per_subcrq, buff_size); - for (i = 0; i < tx_subcrqs; i++) { - buff_size = adapter->req_mtu + VLAN_HLEN; - buff_size = ALIGN(buff_size, L1_CACHE_BYTES); rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], - adapter->req_tx_entries_per_subcrq, - buff_size); - if (rc) { - release_tx_pools(adapter); - return rc; - } + pool_size, buff_size); + if (rc) + goto out_release; rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], IBMVNIC_TSO_BUFS, IBMVNIC_TSO_BUF_SZ); - if (rc) { - release_tx_pools(adapter); - return rc; - } + if (rc) + goto out_release; + } + + adapter->prev_tx_pool_size = pool_size; + adapter->prev_mtu = adapter->req_mtu; + +update_ltb: + /* NOTE: All tx_pools have the same number of buffers (which is + * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS + * buffers (see calls init_one_tx_pool() for these). + * For consistency, we use tx_pool->num_buffers and + * tso_pool->num_buffers below. + */ + rc = -1; + for (i = 0; i < num_pools; i++) { + struct ibmvnic_tx_pool *tso_pool; + struct ibmvnic_tx_pool *tx_pool; + u32 ltb_size; + + tx_pool = &adapter->tx_pool[i]; + ltb_size = tx_pool->num_buffers * tx_pool->buf_size; + if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, + ltb_size)) + goto out; + + dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n", + i, tx_pool->long_term_buff.buff, + tx_pool->num_buffers, tx_pool->buf_size); + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + + for (j = 0; j < tx_pool->num_buffers; j++) + tx_pool->free_map[j] = j; + + tso_pool = &adapter->tso_pool[i]; + ltb_size = tso_pool->num_buffers * tso_pool->buf_size; + if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff, + ltb_size)) + goto out; + + dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n", + i, tso_pool->long_term_buff.buff, + tso_pool->num_buffers, tso_pool->buf_size); + + tso_pool->consumer_index = 0; + tso_pool->producer_index = 0; + + for (j = 0; j < tso_pool->num_buffers; j++) + tso_pool->free_map[j] = j; } return 0; +out_release: + release_tx_pools(adapter); +out: + /* We failed to allocate one or more LTBs or map them on the VIOS. + * Hold onto the pools and any LTBs that we did allocate/map. + */ + return rc; } static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) @@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter) { release_vpd_data(adapter); - release_tx_pools(adapter); - release_rx_pools(adapter); - release_napi(adapter); release_login_buffer(adapter); release_login_rsp_buffer(adapter); @@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter) return rc; } - adapter->map_id = 1; - rc = init_napi(adapter); if (rc) return rc; @@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev) if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); + release_rx_pools(adapter); + release_tx_pools(adapter); goto out; } } @@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev) ibmvnic_napi_disable(adapter); ibmvnic_disable_irqs(adapter); - - clean_rx_pools(adapter); - clean_tx_pools(adapter); } static int __ibmvnic_close(struct net_device *netdev) @@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev) rc = __ibmvnic_close(netdev); ibmvnic_cleanup(netdev); + clean_rx_pools(adapter); + clean_tx_pools(adapter); return rc; } @@ -2036,9 +2226,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) static int do_reset(struct ibmvnic_adapter *adapter, struct ibmvnic_rwi *rwi, u32 reset_state) { + struct net_device *netdev = adapter->netdev; u64 old_num_rx_queues, old_num_tx_queues; u64 old_num_rx_slots, old_num_tx_slots; - struct net_device *netdev = adapter->netdev; int rc; netdev_dbg(adapter->netdev, @@ -2188,8 +2378,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, !adapter->rx_pool || !adapter->tso_pool || !adapter->tx_pool) { - release_rx_pools(adapter); - release_tx_pools(adapter); release_napi(adapter); release_vpd_data(adapter); @@ -2198,16 +2386,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, goto out; } else { - rc = reset_tx_pools(adapter); + rc = init_tx_pools(netdev); if (rc) { - netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", + netdev_dbg(netdev, + "init tx pools failed (%d)\n", rc); goto out; } - rc = reset_rx_pools(adapter); + rc = init_rx_pools(netdev); if (rc) { - netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", + netdev_dbg(netdev, + "init rx pools failed (%d)\n", rc); goto out; } @@ -4778,9 +4968,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq, dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); return; } - netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", - crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, - crq->query_map_rsp.free_pages); + netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n", + crq->query_map_rsp.page_size, + __be32_to_cpu(crq->query_map_rsp.tot_pages), + __be32_to_cpu(crq->query_map_rsp.free_pages)); } static void handle_query_cap_rsp(union ibmvnic_crq *crq, @@ -5527,6 +5718,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->vdev = dev; adapter->netdev = netdev; adapter->login_pending = false; + memset(&adapter->map_ids, 0, sizeof(adapter->map_ids)); + /* map_ids start at 1, so ensure map_id 0 is always "in-use" */ + bitmap_set(adapter->map_ids, 0, 1); ether_addr_copy(adapter->mac_addr, mac_addr_p); ether_addr_copy(netdev->dev_addr, adapter->mac_addr); @@ -5547,6 +5741,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_completion(&adapter->reset_done); init_completion(&adapter->stats_done); clear_bit(0, &adapter->resetting); + adapter->prev_rx_buf_sz = 0; + adapter->prev_mtu = 0; init_success = false; do { @@ -5647,6 +5843,8 @@ static void ibmvnic_remove(struct vio_dev *dev) unregister_netdevice(netdev); release_resources(adapter); + release_rx_pools(adapter); + release_tx_pools(adapter); release_sub_crqs(adapter, 1); release_crq_queue(adapter); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 22df602323bc..b8e42f67d897 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -827,7 +827,7 @@ struct ibmvnic_rx_buff { struct ibmvnic_rx_pool { struct ibmvnic_rx_buff *rx_buff; - int size; + int size; /* # of buffers in the pool */ int index; int buff_size; atomic_t available; @@ -967,6 +967,7 @@ struct ibmvnic_adapter { u64 min_mtu; u64 max_mtu; u64 req_mtu; + u64 prev_mtu; u64 max_multicast_filters; u64 vlan_header_insertion; u64 rx_vlan_header_insertion; @@ -979,13 +980,18 @@ struct ibmvnic_adapter { u64 opt_tx_entries_per_subcrq; u64 opt_rxba_entries_per_subcrq; __be64 tx_rx_desc_req; - u8 map_id; +#define MAX_MAP_ID 255 + DECLARE_BITMAP(map_ids, MAX_MAP_ID); u32 num_active_rx_scrqs; u32 num_active_rx_pools; u32 num_active_rx_napi; u32 num_active_tx_scrqs; u32 num_active_tx_pools; + + u32 prev_rx_pool_size; + u32 prev_tx_pool_size; u32 cur_rx_buf_sz; + u32 prev_rx_buf_sz; struct tasklet_struct tasklet; enum vnic_state state; diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 5b2143f4b1f8..f3424255bd2b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -21,6 +21,7 @@ #include <linux/ptp_classify.h> #include <linux/mii.h> #include <linux/mdio.h> +#include <linux/mutex.h> #include <linux/pm_qos.h> #include "hw.h" diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 3c4f08d20414..83413772b00c 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -158,6 +158,11 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) +enum ice_feature { + ICE_F_DSCP, + ICE_F_MAX +}; + struct ice_txq_meta { u32 q_teid; /* Tx-scheduler element identifier */ u16 q_id; /* Entry in VSI's txq_map bitmap */ @@ -443,6 +448,7 @@ struct ice_pf { /* used to ratelimit the MDD event logging */ unsigned long last_printed_mdd_jiffies; DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT); + DECLARE_BITMAP(features, ICE_F_MAX); DECLARE_BITMAP(state, ICE_STATE_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 21b4c7cd6f05..d407cf8c829d 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -671,6 +671,16 @@ struct ice_aqc_sw_rules_elem { } __packed pdata; }; +/* Query PFC Mode (direct 0x0302) + * Set PFC Mode (direct 0x0303) + */ +struct ice_aqc_set_query_pfc_mode { + u8 pfc_mode; +/* For Query Command response, reserved in all other cases */ +#define ICE_AQC_PFC_VLAN_BASED_PFC 1 +#define ICE_AQC_PFC_DSCP_BASED_PFC 2 + u8 rsvd[15]; +}; /* Get Default Topology (indirect 0x0400) */ struct ice_aqc_get_topo { u8 port_num; @@ -1936,6 +1946,7 @@ struct ice_aq_desc { struct ice_aqc_nvm_pkg_data pkg_data; struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl; struct ice_aqc_pf_vf_msg virt; + struct ice_aqc_set_query_pfc_mode set_query_pfc_mode; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; struct ice_aqc_lldp_stop lldp_stop; @@ -2040,6 +2051,10 @@ enum ice_adminq_opc { ice_aqc_opc_clear_pf_cfg = 0x02A4, + /* DCB commands */ + ice_aqc_opc_query_pfc_mode = 0x0302, + ice_aqc_opc_set_pfc_mode = 0x0303, + /* transmit scheduler commands */ ice_aqc_opc_get_dflt_topo = 0x0400, ice_aqc_opc_add_sched_elems = 0x0401, diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 88d98c9e5f91..3071b8e79499 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -513,7 +513,7 @@ void ice_init_arfs(struct ice_vsi *vsi) if (!vsi || vsi->type != ICE_VSI_PF) return; - arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST, + arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), GFP_KERNEL); if (!arfs_fltr_list) return; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 849fcf605479..241427cd9bc0 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include "ice_common.h" +#include "ice_lib.h" #include "ice_sched.h" #include "ice_dcb.h" @@ -736,6 +737,45 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw, } /** + * ice_aq_set_pfc_mode - Set PFC mode + * @hw: pointer to the HW struct + * @pfc_mode: value of PFC mode to set + * @cd: pointer to command details structure or NULL + * + * This AQ call configures the PFC mode to DSCP-based PFC mode or + * VLAN-based PFC (0x0303) + */ +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd) +{ + struct ice_aqc_set_query_pfc_mode *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC) + return -EINVAL; + + cmd = &desc.params.set_query_pfc_mode; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode); + + cmd->pfc_mode = pfc_mode; + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (status) + return ice_status_to_errno(status); + + /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is + * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has + * been executed, check if cmd->pfc_mode is what was requested. If not, + * return an error. + */ + if (cmd->pfc_mode != pfc_mode) + return -EOPNOTSUPP; + + return 0; +} + +/** * ice_cee_to_dcb_cfg * @cee_cfg: pointer to CEE configuration struct * @pi: port information structure @@ -1207,7 +1247,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv, } /** - * ice_add_dcb_tlv - Add all IEEE TLVs + * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV + * @tlv: location to build the TLV data + * @dcbcfg: location of data to convert to TLV + */ +static void +ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + int i; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_UP_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_DSCP2UP); + tlv->ouisubtype = htonl(ouisubtype); + + /* bytes 0 - 63 - IPv4 DSCP2UP LUT */ + for (i = 0; i < ICE_DSCP_NUM_VAL; i++) { + /* IPv4 mapping */ + buf[i] = dcbcfg->dscp_map[i]; + /* IPv6 mapping */ + buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i]; + } + + /* byte 64 - IPv4 untagged traffic */ + buf[i] = 0; + + /* byte 144 - IPv6 untagged traffic */ + buf[i + ICE_DSCP_IPV6_OFFSET] = 0; +} + +#define ICE_BYTES_PER_TC 8 +/** + * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV + * @tlv: location to build the TLV data + */ +static void +ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_ENF_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_ENFORCE); + tlv->ouisubtype = htonl(ouisubtype); + + /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */ + memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC)); +} + +/** + * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV + * @tlv: location to build the TLV data + * @dcbcfg: location of the data to convert to TLV + */ +static void +ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv, + struct ice_dcbx_cfg *dcbcfg) +{ + struct ice_dcb_ets_cfg *etscfg; + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u8 offset = 0; + u16 typelen; + int i; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_TC_BW_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_TCBW); + tlv->ouisubtype = htonl(ouisubtype); + + /* First Octect after subtype + * ---------------------------- + * | RSV | CBS | RSV | Max TCs | + * | 1b | 1b | 3b | 3b | + * ---------------------------- + */ + etscfg = &dcbcfg->etscfg; + buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; + + /* bytes 1 - 4 reserved */ + offset = 5; + + /* TC BW table + * bytes 0 - 7 for TC 0 - 7 + * + * TSA Assignment table + * bytes 8 - 15 for TC 0 - 7 + */ + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + buf[offset] = etscfg->tcbwtable[i]; + buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i]; + offset++; + } +} + +/** + * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV + * @tlv: Fill PFC TLV in IEEE format + * @dcbcfg: Local store which holds the PFC CFG data + */ +static void +ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) +{ + u8 *buf = tlv->tlvinfo; + u32 ouisubtype; + u16 typelen; + + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_DSCP_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_DSCP_SUBTYPE_PFC); + tlv->ouisubtype = htonl(ouisubtype); + + buf[0] = dcbcfg->pfc.pfccap & 0xF; + buf[1] = dcbcfg->pfc.pfcena & 0xF; +} + +/** + * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs * @tlv: Fill TLV data in IEEE format * @dcbcfg: Local store which holds the DCB Config * @tlvid: Type of IEEE TLV @@ -1218,21 +1391,41 @@ static void ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg, u16 tlvid) { - switch (tlvid) { - case ICE_IEEE_TLV_ID_ETS_CFG: - ice_add_ieee_ets_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_ETS_REC: - ice_add_ieee_etsrec_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_PFC_CFG: - ice_add_ieee_pfc_tlv(tlv, dcbcfg); - break; - case ICE_IEEE_TLV_ID_APP_PRI: - ice_add_ieee_app_pri_tlv(tlv, dcbcfg); - break; - default: - break; + if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) { + switch (tlvid) { + case ICE_IEEE_TLV_ID_ETS_CFG: + ice_add_ieee_ets_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_ETS_REC: + ice_add_ieee_etsrec_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_PFC_CFG: + ice_add_ieee_pfc_tlv(tlv, dcbcfg); + break; + case ICE_IEEE_TLV_ID_APP_PRI: + ice_add_ieee_app_pri_tlv(tlv, dcbcfg); + break; + default: + break; + } + } else { + /* pfc_mode == ICE_QOS_MODE_DSCP */ + switch (tlvid) { + case ICE_TLV_ID_DSCP_UP: + ice_add_dscp_up_tlv(tlv, dcbcfg); + break; + case ICE_TLV_ID_DSCP_ENF: + ice_add_dscp_enf_tlv(tlv); + break; + case ICE_TLV_ID_DSCP_TC_BW: + ice_add_dscp_tc_bw_tlv(tlv, dcbcfg); + break; + case ICE_TLV_ID_DSCP_TO_PFC: + ice_add_dscp_pfc_tlv(tlv, dcbcfg); + break; + default: + break; + } } } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index d7e5e6178a21..9b6f87a889a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -22,6 +22,14 @@ #define ICE_CEE_DCBX_OUI 0x001B21 #define ICE_CEE_DCBX_TYPE 2 + +#define ICE_DSCP_OUI 0xFFFFFF +#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41 +#define ICE_DSCP_SUBTYPE_ENFORCE 0x42 +#define ICE_DSCP_SUBTYPE_TCBW 0x43 +#define ICE_DSCP_SUBTYPE_PFC 0x44 +#define ICE_DSCP_IPV6_OFFSET 80 + #define ICE_CEE_SUBTYPE_PG_CFG 2 #define ICE_CEE_SUBTYPE_PFC_CFG 3 #define ICE_CEE_SUBTYPE_APP_PRI 4 @@ -78,11 +86,20 @@ #define ICE_IEEE_TLV_ID_APP_PRI 6 #define ICE_TLV_ID_END_OF_LLDPPDU 7 #define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG +#define ICE_TLV_ID_DSCP_UP 3 +#define ICE_TLV_ID_DSCP_ENF 4 +#define ICE_TLV_ID_DSCP_TC_BW 5 +#define ICE_TLV_ID_DSCP_TO_PFC 6 #define ICE_IEEE_ETS_TLV_LEN 25 #define ICE_IEEE_PFC_TLV_LEN 6 #define ICE_IEEE_APP_TLV_LEN 11 +#define ICE_DSCP_UP_TLV_LEN 148 +#define ICE_DSCP_ENF_TLV_LEN 132 +#define ICE_DSCP_TC_BW_TLV_LEN 25 +#define ICE_DSCP_PFC_TLV_LEN 6 + /* IEEE 802.1AB LLDP Organization specific TLV */ struct ice_lldp_org_tlv { __be16 typelen; @@ -120,6 +137,7 @@ struct ice_cee_app_prio { u8 prio_map; } __packed; +int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd); enum ice_status ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, struct ice_dcbx_cfg *dcbcfg); diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 926cf748c5ec..26b4d5f579e6 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -544,7 +544,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) * @ets_willing: configure ETS willing * @locked: was this function called with RTNL held */ -static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) +int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) { struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *dcbcfg; @@ -726,6 +726,11 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) /* FW LLDP is disabled, activate SW DCBX/LLDP mode */ dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n"); clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC, + NULL); + if (err) + dev_info(dev, "Failed to set VLAN PFC mode\n"); + err = ice_dcb_sw_dflt_cfg(pf, true, locked); if (err) { dev_err(dev, "Failed to set local DCB config %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 261b6e2ed7bc..3dcde1750a5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -15,6 +15,7 @@ #define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */ void ice_dcb_rebuild(struct ice_pf *pf); +int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked); u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi); @@ -59,6 +60,12 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf) return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) || test_bit(ICE_FLAG_DCB_ENA, pf->flags)); } + +static inline u8 ice_get_pfc_mode(struct ice_pf *pf) +{ + return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode; +} + #else static inline void ice_dcb_rebuild(struct ice_pf *pf) { } @@ -113,6 +120,11 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, return false; } +static inline u8 ice_get_pfc_mode(struct ice_pf *pf) +{ + return 0; +} + static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } static inline void ice_update_dcb_stats(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 4180f1f35fb8..7fdeb411b6df 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -64,7 +64,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcbx_cfg *new_cfg; int bwcfg = 0, bwrec = 0; - int err, i, max_tc = 0; + int err, i; if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) @@ -80,13 +80,14 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i]; bwcfg += ets->tc_tx_bw[i]; new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i]; - new_cfg->etscfg.prio_table[i] = ets->prio_tc[i]; - if (ets->prio_tc[i] > max_tc) - max_tc = ets->prio_tc[i]; + if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) { + /* in DSCP mode up->tc mapping cannot change */ + new_cfg->etscfg.prio_table[i] = ets->prio_tc[i]; + new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; + } new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i]; bwrec += ets->tc_reco_bw[i]; new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i]; - new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i]; } if (ice_dcb_bwchk(pf, new_cfg)) { @@ -94,12 +95,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets) goto ets_out; } - max_tc = pf->hw.func_caps.common_cap.maxtc; - - new_cfg->etscfg.maxtcs = max_tc; - - if (!bwcfg) - new_cfg->etscfg.tcbwtable[0] = 100; + new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc; if (!bwrec) new_cfg->etsrec.tcbwtable[0] = 100; @@ -173,10 +169,13 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode) pf->dcbx_cap = mode; qos_cfg = &pf->hw.port_info->qos_cfg; - if (mode & DCB_CAP_DCBX_VER_CEE) + if (mode & DCB_CAP_DCBX_VER_CEE) { + if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP) + return ICE_DCB_NO_HW_CHG; qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE; - else + } else { qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE; + } dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode); return ICE_DCB_HW_CHG_RST; @@ -683,6 +682,8 @@ ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg, return false; } +#define ICE_BYTES_PER_DSCP_VAL 8 + /** * ice_dcbnl_setapp - set local IEEE App config * @netdev: relevant netdev struct @@ -693,42 +694,117 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_dcb_app_priority_table new_app; struct ice_dcbx_cfg *old_cfg, *new_cfg; + u8 max_tc; int ret; - if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) || - !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + /* ONLY DSCP APP TLVs have operational significance */ + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) return -EINVAL; - mutex_lock(&pf->tc_mutex); + /* only allow APP TLVs in SW Mode */ + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n"); + return -EINVAL; + } - new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; + if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; - old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + if (!ice_is_feature_supported(pf, ICE_F_DSCP)) + return -EOPNOTSUPP; - if (old_cfg->numapps == ICE_DCBX_MAX_APPS) { - ret = -EINVAL; - goto setapp_out; + if (app->protocol >= ICE_DSCP_NUM_VAL) { + netdev_err(netdev, "DSCP value 0x%04X out of range\n", + app->protocol); + return -EINVAL; + } + + max_tc = pf->hw.func_caps.common_cap.maxtc; + if (app->priority >= max_tc) { + netdev_err(netdev, "TC %d out of range, max TC %d\n", + app->priority, max_tc); + return -EINVAL; } + /* grab TC mutex */ + mutex_lock(&pf->tc_mutex); + + new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; + old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + ret = dcb_ieee_setapp(netdev, app); if (ret) goto setapp_out; + if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) { + netdev_err(netdev, "DSCP value 0x%04X already user mapped\n", + app->protocol); + ret = dcb_ieee_delapp(netdev, app); + if (ret) + netdev_err(netdev, "Failed to delete re-mapping TLV\n"); + ret = -EINVAL; + goto setapp_out; + } + new_app.selector = app->selector; new_app.prot_id = app->protocol; new_app.priority = app->priority; - if (ice_dcbnl_find_app(old_cfg, &new_app)) { - ret = 0; - goto setapp_out; - } + /* If port is not in DSCP mode, need to set */ + if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) { + int i, j; + + /* set DSCP mode */ + ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC, + NULL); + if (ret) { + netdev_err(netdev, "Failed to set DSCP PFC mode %d\n", + ret); + goto setapp_out; + } + netdev_info(netdev, "Switched QoS to L3 DSCP mode\n"); + + new_cfg->pfc_mode = ICE_QOS_MODE_DSCP; + + /* set default DSCP QoS values */ + new_cfg->etscfg.willing = 0; + new_cfg->pfc.pfccap = max_tc; + new_cfg->pfc.willing = 0; + + for (i = 0; i < max_tc; i++) + for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) { + int dscp, offset; + + dscp = (i * max_tc) + j; + offset = max_tc * ICE_BYTES_PER_DSCP_VAL; + + new_cfg->dscp_map[dscp] = i; + /* if less that 8 TCs supported */ + if (max_tc < ICE_MAX_TRAFFIC_CLASS) + new_cfg->dscp_map[dscp + offset] = i; + } + + new_cfg->etscfg.tcbwtable[0] = 100; + new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; + new_cfg->etscfg.prio_table[0] = 0; + + for (i = 1; i < max_tc; i++) { + new_cfg->etscfg.tcbwtable[i] = 0; + new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS; + new_cfg->etscfg.prio_table[i] = i; + } + } /* end of switching to DSCP mode */ + + /* apply new mapping for this DSCP value */ + new_cfg->dscp_map[app->protocol] = app->priority; new_cfg->app[new_cfg->numapps++] = new_app; + ret = ice_pf_dcb_cfg(pf, new_cfg, true); /* return of zero indicates new cfg applied */ if (ret == ICE_DCB_HW_CHG_RST) ice_dcbnl_devreset(netdev); - if (ret == ICE_DCB_NO_HW_CHG) - ret = ICE_DCB_HW_CHG_RST; + else + ret = ICE_DCB_NO_HW_CHG; setapp_out: mutex_unlock(&pf->tc_mutex); @@ -749,22 +825,21 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) unsigned int i, j; int ret = 0; - if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) + if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) { + netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n"); return -EINVAL; + } mutex_lock(&pf->tc_mutex); old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - if (old_cfg->numapps <= 1) - goto delapp_out; - ret = dcb_ieee_delapp(netdev, app); if (ret) goto delapp_out; new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg; - for (i = 1; i < new_cfg->numapps; i++) { + for (i = 0; i < new_cfg->numapps; i++) { if (app->selector == new_cfg->app[i].selector && app->protocol == new_cfg->app[i].prot_id && app->priority == new_cfg->app[i].priority) { @@ -784,17 +859,58 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) new_cfg->numapps--; for (j = i; j < new_cfg->numapps; j++) { - new_cfg->app[i].selector = old_cfg->app[j + 1].selector; - new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id; - new_cfg->app[i].priority = old_cfg->app[j + 1].priority; + new_cfg->app[j].selector = old_cfg->app[j + 1].selector; + new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id; + new_cfg->app[j].priority = old_cfg->app[j + 1].priority; } - ret = ice_pf_dcb_cfg(pf, new_cfg, true); - /* return of zero indicates new cfg applied */ + /* if not a DSCP APP TLV or DSCP is not supported, we are done */ + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || + !ice_is_feature_supported(pf, ICE_F_DSCP)) { + ret = ICE_DCB_HW_CHG; + goto delapp_out; + } + + /* if DSCP TLV, then need to address change in mapping */ + clear_bit(app->protocol, new_cfg->dscp_mapped); + /* remap this DSCP value to default value */ + new_cfg->dscp_map[app->protocol] = app->protocol % + ICE_BYTES_PER_DSCP_VAL; + + /* if the last DSCP mapping just got deleted, need to switch + * to L2 VLAN QoS mode + */ + if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) && + new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) { + ret = ice_aq_set_pfc_mode(&pf->hw, + ICE_AQC_PFC_VLAN_BASED_PFC, + NULL); + if (ret) { + netdev_info(netdev, "Failed to set VLAN PFC mode %d\n", + ret); + goto delapp_out; + } + netdev_info(netdev, "Switched QoS to L2 VLAN mode\n"); + + new_cfg->pfc_mode = ICE_QOS_MODE_VLAN; + + ret = ice_dcb_sw_dflt_cfg(pf, true, true); + } else { + ret = ice_pf_dcb_cfg(pf, new_cfg, true); + } + + /* return of ICE_DCB_HW_CHG_RST indicates new cfg applied + * and reset needs to be performed + */ if (ret == ICE_DCB_HW_CHG_RST) ice_dcbnl_devreset(netdev); + + /* if the change was not siginificant enough to actually call + * the reconfiguration flow, we still need to tell caller that + * their request was successfully handled + */ if (ret == ICE_DCB_NO_HW_CHG) - ret = ICE_DCB_HW_CHG_RST; + ret = ICE_DCB_HW_CHG; delapp_out: mutex_unlock(&pf->tc_mutex); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 14afce82ef63..cae1cd97a1ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -22,7 +22,7 @@ struct ice_info_ctx { * * If a version does not exist, for example when attempting to get the * inactive version of flash when there is no pending update, the function - * should leave the buffer in the ctx structure empty and return 0. + * should leave the buffer in the ctx structure empty. */ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) @@ -35,7 +35,7 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx) snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn); } -static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; enum ice_status status; @@ -45,148 +45,127 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) /* We failed to locate the PBA, so just skip this entry */ dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", ice_stat_str(status)); - - return 0; } -static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver, - hw->fw_patch); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch); } -static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", + hw->api_maj_ver, hw->api_min_ver); } -static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build); - - return 0; } -static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &pf->hw.flash.orom; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", + orom->major, orom->build, orom->patch); } -static int -ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_orom_info *orom = &ctx->pending_orom; if (ctx->dev_caps.common_cap.nvm_update_pending_orom) snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch); - - return 0; } -static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &pf->hw.flash.nvm; snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); - - return 0; } -static int -ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", + nvm->major, nvm->minor); } -static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &pf->hw.flash.nvm; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); - - return 0; } -static int -ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_nvm_info *nvm = &ctx->pending_nvm; if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack); - - return 0; } -static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_hw *hw = &pf->hw; snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name); - - return 0; } -static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver; - snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update, - pkg->draft); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", + pkg->major, pkg->minor, pkg->update, pkg->draft); } -static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void +ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx) { snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id); - - return 0; } -static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &pf->hw.flash.netlist; /* The netlist version fields are BCD formatted */ - snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); - - return 0; + snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", + netlist->major, netlist->minor, + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } -static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) +static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &pf->hw.flash.netlist; snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); - - return 0; } -static int -ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; @@ -194,21 +173,18 @@ ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor, - netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev, - netlist->cust_ver); - - return 0; + netlist->type >> 16, netlist->type & 0xFFFF, + netlist->rev, netlist->cust_ver); } -static int -ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx) +static void +ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, + struct ice_info_ctx *ctx) { struct ice_netlist_info *netlist = &ctx->pending_netlist; if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash); - - return 0; } #define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL } @@ -238,8 +214,8 @@ enum ice_version_type { static const struct ice_devlink_version { enum ice_version_type type; const char *key; - int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); - int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); + void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx); + void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx); } ice_devlink_versions[] = { fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba), running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt), @@ -351,24 +327,15 @@ static int ice_devlink_info_get(struct devlink *devlink, memset(ctx->buf, 0, sizeof(ctx->buf)); - err = ice_devlink_versions[i].getter(pf, ctx); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info"); - goto out_free_ctx; - } + ice_devlink_versions[i].getter(pf, ctx); /* If the default getter doesn't report a version, use the * fallback function. This is primarily useful in the case of * "stored" versions that want to report the same value as the * running version in the normal case of no pending update. */ - if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) { - err = ice_devlink_versions[i].fallback(pf, ctx); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info"); - goto out_free_ctx; - } - } + if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) + ice_devlink_versions[i].fallback(pf, ctx); /* Do not report missing versions */ if (ctx->buf[0] == '\0') @@ -498,19 +465,11 @@ struct ice_pf *ice_allocate_pf(struct device *dev) * * Return: zero on success or an error code on failure. */ -int ice_devlink_register(struct ice_pf *pf) +void ice_devlink_register(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct device *dev = ice_pf_to_dev(pf); - int err; - - err = devlink_register(devlink); - if (err) { - dev_err(dev, "devlink registration failed: %d\n", err); - return err; - } - return 0; + devlink_register(devlink); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h index e07e74426bde..e721d7b0d627 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/ice_devlink.h @@ -6,7 +6,7 @@ struct ice_pf *ice_allocate_pf(struct device *dev); -int ice_devlink_register(struct ice_pf *pf); +void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); int ice_devlink_create_port(struct ice_vsi *vsi); void ice_devlink_destroy_port(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c451cf401e63..6f0a29be3ee5 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1215,6 +1215,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) enum ice_status status; bool dcbx_agent_status; + if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) { + clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); + dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n"); + ret = -EOPNOTSUPP; + goto ethtool_exit; + } + /* Remove rule to direct LLDP packets to default VSI. * The FW LLDP engine will now be consuming them. */ diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 59ef68f072c0..cbd8424631e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -952,7 +952,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl); ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac); if (frag) - loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF; + loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF; break; case ICE_FLTR_PTYPE_NONF_IPV4_UDP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index d2d40e18ae8a..da4163856f4c 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -48,7 +48,7 @@ * requests that the packet not be fragmented. MF indicates that a packet has * been fragmented. */ -#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20 +#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20 enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index dde9802c6c72..3adbd9a179a7 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3573,3 +3573,50 @@ int ice_set_link(struct ice_vsi *vsi, bool ena) return 0; } + +/** + * ice_is_feature_supported + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to be checked + * + * returns true if feature is supported, false otherwise + */ +bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return false; + + return test_bit(f, pf->features); +} + +/** + * ice_set_feature_support + * @pf: pointer to the struct ice_pf instance + * @f: feature enum to set + */ +static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f) +{ + if (f < 0 || f >= ICE_F_MAX) + return; + + set_bit(f, pf->features); +} + +/** + * ice_init_feature_support + * @pf: pointer to the struct ice_pf instance + * + * called during init to setup supported feature + */ +void ice_init_feature_support(struct ice_pf *pf) +{ + switch (pf->hw.device_id) { + case ICE_DEV_ID_E810C_BACKPLANE: + case ICE_DEV_ID_E810C_QSFP: + case ICE_DEV_ID_E810C_SFP: + ice_set_feature_support(pf, ICE_F_DSCP); + break; + default: + break; + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index d5a28bf0fc2c..4512c8513178 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -116,4 +116,6 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi); int ice_clear_dflt_vsi(struct ice_sw *sw); +bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f); +void ice_init_feature_support(struct ice_pf *pf); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 0d6c143f6653..909e5cd98054 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -624,7 +624,10 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi) netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n"); break; case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA: - netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) + netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n"); + else + netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); break; default: break; @@ -1965,7 +1968,8 @@ static int ice_configure_phy(struct ice_vsi *vsi) ice_print_topo_conflict(vsi); - if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) + if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && + phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) return -EPERM; if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) @@ -4258,12 +4262,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); - err = ice_devlink_register(pf); - if (err) { - dev_err(dev, "ice_devlink_register failed: %d\n", err); - goto err_exit_unroll; - } - #ifndef CONFIG_DYNAMIC_DEBUG if (debug < -1) hw->debug_mask = debug; @@ -4276,6 +4274,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_exit_unroll; } + ice_init_feature_support(pf); + ice_request_fw(pf); /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be @@ -4497,6 +4497,7 @@ probe_done: dev_warn(dev, "RDMA is not supported on this device\n"); } + ice_devlink_register(pf); return 0; err_init_aux_unroll: @@ -4520,7 +4521,6 @@ err_init_pf_unroll: ice_devlink_destroy_regions(pf); ice_deinit_hw(hw); err_exit_unroll: - ice_devlink_unregister(pf); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return err; @@ -4597,9 +4597,7 @@ static void ice_remove(struct pci_dev *pdev) struct ice_pf *pf = pci_get_drvdata(pdev); int i; - if (!pf) - return; - + ice_devlink_unregister(pf); for (i = 0; i < ICE_MAX_RESET_WAIT; i++) { if (!ice_is_reset_in_progress(pf->state)) break; @@ -4636,7 +4634,6 @@ static void ice_remove(struct pci_dev *pdev) ice_deinit_pf(pf); ice_devlink_destroy_regions(pf); ice_deinit_hw(&pf->hw); - ice_devlink_unregister(pf); /* Issue a PFR as part of the prescribed driver unload flow. Do not * do it via ice_schedule_reset() since there is no need to rebuild @@ -7239,6 +7236,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_open = ice_open, .ndo_stop = ice_stop, .ndo_start_xmit = ice_start_xmit, + .ndo_select_queue = ice_select_queue, .ndo_features_check = ice_features_check, .ndo_set_rx_mode = ice_set_rx_mode, .ndo_set_mac_address = ice_set_mac_address, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6ee8e0032d52..13b2bdc25b0d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -6,6 +6,7 @@ #include <linux/prefetch.h> #include <linux/mm.h> #include <linux/bpf_trace.h> +#include <net/dsfield.h> #include <net/xdp.h> #include "ice_txrx_lib.h" #include "ice_lib.h" @@ -2296,6 +2297,39 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev) } /** + * ice_get_dscp_up - return the UP/TC value for a SKB + * @dcbcfg: DCB config that contains DSCP to UP/TC mapping + * @skb: SKB to query for info to determine UP/TC + * + * This function is to only be called when the PF is in L3 DSCP PFC mode + */ +static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb) +{ + u8 dscp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return dcbcfg->dscp_map[dscp]; +} + +u16 +ice_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct ice_dcbx_cfg *dcbcfg; + + dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP) + skb->priority = ice_get_dscp_up(dcbcfg, skb); + + return netdev_pick_tx(netdev, skb, sb_dev); +} + +/** * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue * @tx_ring: tx_ring to clean */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 1e46e80f3d6f..b0bbbec4e3a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -378,6 +378,9 @@ union ice_32b_rx_flex_desc; bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); +u16 +ice_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); void ice_clean_tx_ring(struct ice_ring *tx_ring); void ice_clean_rx_ring(struct ice_ring *rx_ring); int ice_setup_tx_ring(struct ice_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index d33d1906103c..6705f56be020 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -604,7 +604,8 @@ struct ice_dcb_app_priority_table { }; #define ICE_MAX_USER_PRIORITY 8 -#define ICE_DCBX_MAX_APPS 32 +#define ICE_DCBX_MAX_APPS 64 +#define ICE_DSCP_NUM_VAL 64 #define ICE_LLDPDU_SIZE 1500 #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 @@ -622,7 +623,14 @@ struct ice_dcbx_cfg { struct ice_dcb_ets_cfg etscfg; struct ice_dcb_ets_cfg etsrec; struct ice_dcb_pfc_cfg pfc; +#define ICE_QOS_MODE_VLAN 0x0 +#define ICE_QOS_MODE_DSCP 0x1 + u8 pfc_mode; struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; + /* when DSCP mapping defined by user set its bit to 1 */ + DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL); + /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */ + u8 dscp_map[ICE_DSCP_NUM_VAL]; u8 dcbx_mode; #define ICE_DCBX_MODE_CEE 0x1 #define ICE_DCBX_MODE_IEEE 0x2 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index a604552fa634..4a69823e6abd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -351,6 +351,7 @@ struct ixgbe_ring { }; u16 rx_offset; struct xdp_rxq_info xdp_rxq; + spinlock_t tx_lock; /* used in XDP mode */ struct xsk_buff_pool *xsk_pool; u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */ u16 rx_buf_len; @@ -375,11 +376,13 @@ enum ixgbe_ring_f_enum { #define IXGBE_MAX_FCOE_INDICES 8 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) -#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +#define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 #define IXGBE_MAX_MACVLANS 63 +DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); + struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ @@ -629,7 +632,7 @@ struct ixgbe_adapter { /* XDP */ int num_xdp_queues; - struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES]; + struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS]; unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */ /* TX */ @@ -772,6 +775,22 @@ struct ixgbe_adapter { #endif /* CONFIG_IXGBE_IPSEC */ }; +static inline int ixgbe_determine_xdp_q_idx(int cpu) +{ + if (static_key_enabled(&ixgbe_xdp_locking_key)) + return cpu % IXGBE_MAX_XDP_QS; + else + return cpu; +} + +static inline +struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter) +{ + int index = ixgbe_determine_xdp_q_idx(smp_processor_id()); + + return adapter->xdp_ring[index]; +} + static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) { switch (adapter->hw.mac.type) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 0218f6c9b925..86b11164655e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -299,7 +299,10 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter) { - return adapter->xdp_prog ? nr_cpu_ids : 0; + int queues; + + queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids); + return adapter->xdp_prog ? queues : 0; } #define IXGBE_RSS_64Q_MASK 0x3F @@ -947,6 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = xdp_idx; set_ring_xdp(ring); + spin_lock_init(&ring->tx_lock); /* assign ring to adapter */ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); @@ -1032,6 +1036,9 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) adapter->q_vector[v_idx] = NULL; __netif_napi_del(&q_vector->napi); + if (static_key_enabled(&ixgbe_xdp_locking_key)) + static_branch_dec(&ixgbe_xdp_locking_key); + /* * after a call to __netif_napi_del() napi may still be used and * ixgbe_get_stats64() might access the rings on this vector, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 13c4782b920a..69e8bd9aa1a8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); +DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key); +EXPORT_SYMBOL(ixgbe_xdp_locking_key); + static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); @@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; @@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; @@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); @@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, if (ixgbe_init_rss_key(adapter)) return -ENOMEM; - adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL); + adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL); if (!adapter->af_xdp_zc_qps) return -ENOMEM; @@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, } #endif -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; u32 len, cmd_type; @@ -10131,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return -EINVAL; } - if (nr_cpu_ids > MAX_XDP_QUEUES) + /* if the number of cpus is much larger than the maximum of queues, + * we should stop it and then return with ENOMEM like before. + */ + if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2) return -ENOMEM; + else if (nr_cpu_ids > IXGBE_MAX_XDP_QS) + static_branch_inc(&ixgbe_xdp_locking_key); old_prog = xchg(&adapter->xdp_prog, prog); need_reset = (!!prog != !!old_prog); @@ -10199,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring) writel(ring->next_to_use, ring->tail); } +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring) +{ + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + ixgbe_xdp_ring_update_tail(ring); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); +} + static int ixgbe_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { @@ -10216,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, /* During program transitions its possible adapter->xdp_prog is assigned * but ring has not been configured yet. In this case simply abort xmit. */ - ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL; if (unlikely(!ring)) return -ENXIO; if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state))) return -ENXIO; + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; int err; - err = ixgbe_xmit_xdp_ring(adapter, xdpf); + err = ixgbe_xmit_xdp_ring(ring, xdpf); if (err != IXGBE_XDP_TX) break; nxmit++; @@ -10236,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(flags & XDP_XMIT_FLUSH)) ixgbe_xdp_ring_update_tail(ring); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); + return nxmit; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 2aeec78029bc..a82533f21d36 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -12,7 +12,7 @@ #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ IXGBE_TXD_CMD_RS) -int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, +int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, struct xdp_frame *xdpf); bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, @@ -23,6 +23,7 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, struct sk_buff *skb); void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring); +void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring); void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask); void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index b1d22e4d5ec9..db2bc58dfcfd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -100,6 +100,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; + struct ixgbe_ring *ring; struct xdp_frame *xdpf; u32 act; @@ -120,7 +121,12 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, xdpf = xdp_convert_buff_to_frame(xdp); if (unlikely(!xdpf)) goto out_failure; - result = ixgbe_xmit_xdp_ring(adapter, xdpf); + ring = ixgbe_determine_xdp_ring(adapter); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_lock(&ring->tx_lock); + result = ixgbe_xmit_xdp_ring(ring, xdpf); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) + spin_unlock(&ring->tx_lock); if (result == IXGBE_XDP_CONSUMED) goto out_failure; break; @@ -334,13 +340,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, xdp_do_flush_map(); if (xdp_xmit & IXGBE_XDP_TX) { - struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; + struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter); - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. - */ - wmb(); - writel(ring->next_to_use, ring->tail); + ixgbe_xdp_ring_update_tail_locked(ring); } u64_stats_update_begin(&rx_ring->syncp); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 62f8c5212182..2258e3f19161 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -96,6 +96,9 @@ struct ltq_etop_priv { struct ltq_etop_chan ch[MAX_DMA_CHAN]; int tx_free[MAX_DMA_CHAN >> 1]; + int tx_burst_len; + int rx_burst_len; + spinlock_t lock; }; @@ -259,7 +262,7 @@ ltq_etop_hw_init(struct net_device *dev) /* enable crc generation */ ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); - ltq_dma_init_port(DMA_PORT_ETOP); + ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len); for (i = 0; i < MAX_DMA_CHAN; i++) { int irq = LTQ_DMA_CH0_INT + i; @@ -472,8 +475,8 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - /* dma needs to start on a 16 byte aligned address */ - byte_offset = CPHYSADDR(skb->data) % 16; + /* dma needs to start on a burst length value aligned address */ + byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4); ch->skb[ch->dma.desc] = skb; netif_trans_update(dev); @@ -667,6 +670,18 @@ ltq_etop_probe(struct platform_device *pdev) spin_lock_init(&priv->lock); SET_NETDEV_DEV(dev, &pdev->dev); + err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len); + if (err < 0) { + dev_err(&pdev->dev, "unable to read tx-burst-length property\n"); + return err; + } + + err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len); + if (err < 0) { + dev_err(&pdev->dev, "unable to read rx-burst-length property\n"); + return err; + } + for (i = 0; i < MAX_DMA_CHAN; i++) { if (IS_TX(i)) netif_napi_add(dev, &priv->ch[i].napi, diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index fb78f17d734f..9b7307eba97c 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -14,13 +14,15 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/if_vlan.h> + #include <linux/of_net.h> #include <linux/of_platform.h> #include <xway_dma.h> /* DMA */ -#define XRX200_DMA_DATA_LEN 0x600 +#define XRX200_DMA_DATA_LEN (SZ_64K - 1) #define XRX200_DMA_RX 0 #define XRX200_DMA_TX 1 @@ -71,6 +73,9 @@ struct xrx200_priv { struct net_device *net_dev; struct device *dev; + int tx_burst_len; + int rx_burst_len; + __iomem void *pmac_reg; }; @@ -106,7 +111,8 @@ static void xrx200_flush_dma(struct xrx200_chan *ch) break; desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - XRX200_DMA_DATA_LEN; + (ch->priv->net_dev->mtu + VLAN_ETH_HLEN + + ETH_FCS_LEN); ch->dma.desc++; ch->dma.desc %= LTQ_DESC_NUM; } @@ -154,19 +160,20 @@ static int xrx200_close(struct net_device *net_dev) static int xrx200_alloc_skb(struct xrx200_chan *ch) { + int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; struct sk_buff *skb = ch->skb[ch->dma.desc]; dma_addr_t mapping; int ret = 0; ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, - XRX200_DMA_DATA_LEN); + len); if (!ch->skb[ch->dma.desc]) { ret = -ENOMEM; goto skip; } mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data, - XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE); + len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) { dev_kfree_skb_any(ch->skb[ch->dma.desc]); ch->skb[ch->dma.desc] = skb; @@ -179,8 +186,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch) wmb(); skip: ch->dma.desc_base[ch->dma.desc].ctl = - LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | - XRX200_DMA_DATA_LEN; + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len; return ret; } @@ -316,8 +322,8 @@ static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(priv->dev, mapping))) goto err_drop; - /* dma needs to start on a 16 byte aligned address */ - byte_offset = mapping % 16; + /* dma needs to start on a burst length value aligned address */ + byte_offset = mapping % (priv->tx_burst_len * 4); desc->addr = mapping - byte_offset; /* Make sure the address is written before we give it to HW */ @@ -340,10 +346,57 @@ err_drop: return NETDEV_TX_OK; } +static int +xrx200_change_mtu(struct net_device *net_dev, int new_mtu) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + struct xrx200_chan *ch_rx = &priv->chan_rx; + int old_mtu = net_dev->mtu; + bool running = false; + struct sk_buff *skb; + int curr_desc; + int ret = 0; + + net_dev->mtu = new_mtu; + + if (new_mtu <= old_mtu) + return ret; + + running = netif_running(net_dev); + if (running) { + napi_disable(&ch_rx->napi); + ltq_dma_close(&ch_rx->dma); + } + + xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM); + curr_desc = ch_rx->dma.desc; + + for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; + ch_rx->dma.desc++) { + skb = ch_rx->skb[ch_rx->dma.desc]; + ret = xrx200_alloc_skb(ch_rx); + if (ret) { + net_dev->mtu = old_mtu; + break; + } + dev_kfree_skb_any(skb); + } + + ch_rx->dma.desc = curr_desc; + if (running) { + napi_enable(&ch_rx->napi); + ltq_dma_open(&ch_rx->dma); + ltq_dma_enable_irq(&ch_rx->dma); + } + + return ret; +} + static const struct net_device_ops xrx200_netdev_ops = { .ndo_open = xrx200_open, .ndo_stop = xrx200_close, .ndo_start_xmit = xrx200_start_xmit, + .ndo_change_mtu = xrx200_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -369,7 +422,7 @@ static int xrx200_dma_init(struct xrx200_priv *priv) int ret = 0; int i; - ltq_dma_init_port(DMA_PORT_ETOP); + ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len); ch_rx->dma.nr = XRX200_DMA_RX; ch_rx->dma.dev = priv->dev; @@ -453,7 +506,7 @@ static int xrx200_probe(struct platform_device *pdev) net_dev->netdev_ops = &xrx200_netdev_ops; SET_NETDEV_DEV(net_dev, dev); net_dev->min_mtu = ETH_ZLEN; - net_dev->max_mtu = XRX200_DMA_DATA_LEN; + net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN; /* load the memory ranges */ priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); @@ -478,6 +531,18 @@ static int xrx200_probe(struct platform_device *pdev) if (err) eth_hw_addr_random(net_dev); + err = device_property_read_u32(dev, "lantiq,tx-burst-length", &priv->tx_burst_len); + if (err < 0) { + dev_err(dev, "unable to read tx-burst-length property\n"); + return err; + } + + err = device_property_read_u32(dev, "lantiq,rx-burst-length", &priv->rx_burst_len); + if (err < 0) { + dev_err(dev, "unable to read rx-burst-length property\n"); + return err; + } + /* bring up the dma engine and IP core */ err = xrx200_dma_init(priv); if (err) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 34a089b71e55..d379a35c4618 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable) if (!cgx) return; - if (is_dev_rpm(cgx)) - return; - if (enable) { /* Enable inbound PTP timestamping */ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL); @@ -1545,9 +1542,11 @@ static int cgx_lmac_exit(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx) { if (is_dev_rpm(cgx)) - cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC); + cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | + RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); else - cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); + cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 | + RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF); } static struct mac_ops cgx_mac_ops = { @@ -1571,6 +1570,7 @@ static struct mac_ops cgx_mac_ops = { .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status, .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm, .mac_pause_frm_config = cgx_lmac_pause_frm_config, + .mac_enadis_ptp_config = cgx_lmac_ptp_config, }; static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index d9bea13f15b8..8931864ee110 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -191,6 +191,7 @@ enum nix_scheduler { #define NIX_CHAN_SDP_CH_START (0x700ull) #define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a)) #define NIX_CHAN_SDP_NUM_CHANS 256 +#define NIX_CHAN_CPT_CH_START (0x800ull) /* The mask is to extract lower 10-bits of channel number * which CPT will pass to X2P. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index c38306b3384a..fc6e7423cbd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -102,6 +102,11 @@ struct mac_ops { void (*mac_pause_frm_config)(void *cgxd, int lmac_id, bool enable); + + /* Enable/Disable Inbound PTP */ + void (*mac_enadis_ptp_config)(void *cgxd, + int lmac_id, + bool enable); }; struct cgx { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 154877706a0e..dfe487235007 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -84,7 +84,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0009) +#define OTX2_MBOX_VERSION (0x000a) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \ M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \ M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \ cgx_pause_frm_cfg) \ -M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ -M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ -M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ -M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ -M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ - cgx_set_link_mode_rsp) \ -M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ - cgx_features_info_msg) \ -M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ -M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ - cgx_mac_addr_add_rsp) \ -M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ +M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \ +M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \ +M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \ msg_rsp) \ -M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \ cgx_max_dmac_entries_get_rsp) \ -M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ -M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ +M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \ +M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\ + cgx_set_link_mode_rsp) \ +M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \ +M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \ + cgx_features_info_msg) \ +M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \ msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ @@ -186,6 +186,8 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \ M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ cpt_rd_wr_reg_msg) \ +M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \ + cpt_inline_ipsec_cfg_msg, msg_rsp) \ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ @@ -229,6 +231,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \ M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \ npc_mcam_read_entry_req, \ npc_mcam_read_entry_rsp) \ +M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \ + npc_set_pkind, msg_rsp) \ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \ msg_req, npc_mcam_read_base_rule_rsp) \ M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \ @@ -270,6 +274,10 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ nix_bp_cfg_rsp) \ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ +M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \ + nix_inline_ipsec_cfg, msg_rsp) \ +M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \ + nix_inline_ipsec_lf_cfg, msg_rsp) \ M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ nix_cn10k_aq_enq_rsp) \ M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \ @@ -575,10 +583,13 @@ struct cgx_mac_addr_update_req { }; #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ -#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ -#define RVU_MAC_VERSION BIT_ULL(2) -#define RVU_MAC_CGX BIT_ULL(3) -#define RVU_MAC_RPM BIT_ULL(4) +#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1) + /* flow control from physical link higig2 messages */ +#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */ +#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */ +#define RVU_MAC_VERSION BIT_ULL(4) +#define RVU_MAC_CGX BIT_ULL(5) +#define RVU_MAC_RPM BIT_ULL(6) struct cgx_features_info_msg { struct mbox_msghdr hdr; @@ -593,6 +604,22 @@ struct rpm_stats_rsp { u64 tx_stats[RPM_TX_STATS_COUNT]; }; +struct npc_set_pkind { + struct mbox_msghdr hdr; +#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0) +#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63) + u64 mode; +#define PKIND_TX BIT_ULL(0) +#define PKIND_RX BIT_ULL(1) + u8 dir; + u8 pkind; /* valid only in case custom flag */ + u8 var_len_off; /* Offset of custom header length field. + * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND + */ + u8 var_len_off_mask; /* Mask for length with in offset */ + u8 shift_dir; /* shift direction to get length of the header at var_len_off */ +}; + /* NPA mbox message formats */ /* NPA mailbox error codes @@ -698,6 +725,8 @@ enum nix_af_status { NIX_AF_ERR_INVALID_BANDPROF = -426, NIX_AF_ERR_IPOLICER_NOTSUPP = -427, NIX_AF_ERR_BANDPROF_INVAL_REQ = -428, + NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, + NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, }; /* For NIX RX vtag action */ @@ -1065,6 +1094,40 @@ struct nix_bp_cfg_rsp { u8 chan_cnt; /* Number of channel for which bpids are assigned */ }; +/* Global NIX inline IPSec configuration */ +struct nix_inline_ipsec_cfg { + struct mbox_msghdr hdr; + u32 cpt_credit; + struct { + u8 egrp; + u8 opcode; + u16 param1; + u16 param2; + } gen_cfg; + struct { + u16 cpt_pf_func; + u8 cpt_slot; + } inst_qsel; + u8 enable; +}; + +/* Per NIX LF inline IPSec configuration */ +struct nix_inline_ipsec_lf_cfg { + struct mbox_msghdr hdr; + u64 sa_base_addr; + struct { + u32 tag_const; + u16 lenm1_max; + u8 sa_pow2_size; + u8 tt; + } ipsec_cfg0; + struct { + u32 sa_idx_max; + u8 sa_idx_w; + } ipsec_cfg1; + u8 enable; +}; + struct nix_hw_info { struct mbox_msghdr hdr; u16 rsvs16; @@ -1357,12 +1420,15 @@ struct npc_mcam_get_stats_rsp { enum ptp_op { PTP_OP_ADJFINE = 0, PTP_OP_GET_CLOCK = 1, + PTP_OP_GET_TSTMP = 2, + PTP_OP_SET_THRESH = 3, }; struct ptp_req { struct mbox_msghdr hdr; u8 op; s64 scaled_ppm; + u64 thresh; }; struct ptp_rsp { @@ -1399,7 +1465,9 @@ enum cpt_af_status { CPT_AF_ERR_LF_INVALID = -903, CPT_AF_ERR_ACCESS_DENIED = -904, CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905, - CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906 + CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906, + CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907, + CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908 }; /* CPT mbox message formats */ @@ -1420,6 +1488,22 @@ struct cpt_lf_alloc_req_msg { int blkaddr; }; +#define CPT_INLINE_INBOUND 0 +#define CPT_INLINE_OUTBOUND 1 + +/* Mailbox message request format for CPT IPsec + * inline inbound and outbound configuration. + */ +struct cpt_inline_ipsec_cfg_msg { + struct mbox_msghdr hdr; + u8 enable; + u8 slot; + u8 dir; + u8 sso_pf_func_ovrd; + u16 sso_pf_func; /* inbound path SSO_PF_FUNC */ + u16 nix_pf_func; /* outbound path NIX_PF_FUNC */ +}; + /* Mailbox message request and response format for CPT stats. */ struct cpt_sts_req { struct mbox_msghdr hdr; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 3a819b24accc..6e1192f52608 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -31,9 +31,9 @@ enum npc_kpu_la_ltype { NPC_LT_LA_HIGIG2_ETHER, NPC_LT_LA_IH_NIX_HIGIG2_ETHER, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_LT_LA_CH_LEN_90B_ETHER, NPC_LT_LA_CPT_HDR, NPC_LT_LA_CUSTOM_L2_24B_ETHER, + NPC_LT_LA_CUSTOM_PRE_L2_ETHER, NPC_LT_LA_CUSTOM0 = 0xE, NPC_LT_LA_CUSTOM1 = 0xF, }; @@ -148,10 +148,11 @@ enum npc_kpu_lh_ltype { * Software assigns pkind for each incoming port such as CGX * Ethernet interfaces, LBK interfaces, etc. */ -#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND +#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND enum npc_pkind_type { NPC_RX_LBK_PKIND = 0ULL, + NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL, NPC_RX_VLAN_EXDSA_PKIND = 56ULL, NPC_RX_CHLEN24B_PKIND = 57ULL, NPC_RX_CPT_HDR_PKIND, @@ -162,6 +163,10 @@ enum npc_pkind_type { NPC_TX_DEF_PKIND, /* NIX-TX PKIND */ }; +enum npc_interface_type { + NPC_INTF_MODE_DEF, +}; + /* list of known and supported fields in packet header and * fields present in key structure. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index 588822a0cf21..1a8c5376297c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -176,9 +176,8 @@ enum npc_kpu_parser_state { NPC_S_KPU1_EXDSA, NPC_S_KPU1_HIGIG2, NPC_S_KPU1_IH_NIX_HIGIG2, - NPC_S_KPU1_CUSTOM_L2_90B, + NPC_S_KPU1_CUSTOM_PRE_L2, NPC_S_KPU1_CPT_HDR, - NPC_S_KPU1_CUSTOM_L2_24B, NPC_S_KPU1_VLAN_EXDSA, NPC_S_KPU2_CTAG, NPC_S_KPU2_CTAG2, @@ -188,6 +187,8 @@ enum npc_kpu_parser_state { NPC_S_KPU2_PREHEADER, NPC_S_KPU2_EXDSA, NPC_S_KPU2_NGIO, + NPC_S_KPU2_CPT_CTAG, + NPC_S_KPU2_CPT_QINQ, NPC_S_KPU3_CTAG, NPC_S_KPU3_STAG, NPC_S_KPU3_QINQ, @@ -979,8 +980,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 16, 20, 0, 0, - NPC_S_KPU1_ETHER, 0, 0, - NPC_LID_LA, NPC_LT_NA, + NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER, 0, 0, 0, 0, 0, @@ -996,27 +997,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 36, 40, 44, 0, 0, - NPC_S_KPU1_CUSTOM_L2_24B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 40, 54, 58, 0, 0, - NPC_S_KPU1_CPT_HDR, 0, 0, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CPT_HDR, 40, 0, NPC_LID_LA, NPC_LT_NA, 0, - 0, 0, 0, 0, + 7, 7, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 102, 106, 110, 0, 0, - NPC_S_KPU1_CUSTOM_L2_90B, 0, 0, - NPC_LID_LA, NPC_LT_NA, + 12, 16, 20, 0, 0, + NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1, + NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, 0, 0, 0, 0, 0, @@ -1711,7 +1712,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP, 0xffff, 0x0000, @@ -1720,7 +1721,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_IP6, 0xffff, 0x0000, @@ -1729,7 +1730,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ARP, 0xffff, 0x0000, @@ -1738,7 +1739,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_RARP, 0xffff, 0x0000, @@ -1747,7 +1748,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_PTP, 0xffff, 0x0000, @@ -1756,7 +1757,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_FCOE, 0xffff, 0x0000, @@ -1765,7 +1766,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, NPC_ETYPE_CTAG, @@ -1774,7 +1775,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_CTAG, 0xffff, 0x0000, @@ -1783,7 +1784,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_SBTAG, 0xffff, 0x0000, @@ -1792,7 +1793,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -1801,7 +1802,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_ETAG, 0xffff, 0x0000, @@ -1810,7 +1811,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSU, 0xffff, 0x0000, @@ -1819,7 +1820,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_MPLSM, 0xffff, 0x0000, @@ -1828,7 +1829,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, NPC_ETYPE_NSH, 0xffff, 0x0000, @@ -1837,7 +1838,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_90B, 0xff, + NPC_S_KPU1_CUSTOM_PRE_L2, 0xff, 0x0000, 0x0000, 0x0000, @@ -1847,168 +1848,33 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, NPC_ETYPE_IP, 0xffff, 0x0000, 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_CTAG, - 0xffff, 0x0000, 0x0000, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - NPC_ETYPE_QINQ, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, - NPC_ETYPE_IP, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, - 0x0000, NPC_ETYPE_IP6, 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, 0x0000, - 0xffff, 0x0000, 0x0000, - NPC_ETYPE_CTAG, - 0xffff, - }, - { - NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0xffff, - 0x0000, 0x0000, - NPC_ETYPE_QINQ, - 0xffff, }, { NPC_S_KPU1_CPT_HDR, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_IP6, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_RARP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_PTP, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_FCOE, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, NPC_ETYPE_CTAG, 0xffff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_CTAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_SBTAG, - 0xffff, 0x0000, 0x0000, 0x0000, 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, + NPC_S_KPU1_CPT_HDR, 0xff, NPC_ETYPE_QINQ, 0xffff, 0x0000, @@ -2017,51 +1883,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = { 0x0000, }, { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_ETAG, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSU, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_MPLSM, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - NPC_ETYPE_NSH, - 0xffff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU1_CUSTOM_L2_24B, 0xff, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - 0x0000, - }, - { NPC_S_KPU1_VLAN_EXDSA, 0xff, NPC_ETYPE_CTAG, 0xffff, @@ -3066,6 +2887,42 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = { 0x0000, }, { + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_CTAG, 0xff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP, + 0xffff, + 0x0000, + 0x0000, + }, + { + NPC_S_KPU2_CPT_QINQ, 0xff, + NPC_ETYPE_CTAG, + 0xffff, + NPC_ETYPE_IP6, + 0xffff, + 0x0000, + 0x0000, + }, + { NPC_S_NA, 0X00, 0x0000, 0x0000, @@ -7496,15 +7353,6 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = { NPC_S_KPU9_GTPU, 0xff, 0x0000, 0x0000, - NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU, - NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK, - 0x0000, - 0x0000, - }, - { - NPC_S_KPU9_GTPU, 0xff, - 0x0000, - 0x0000, NPC_GTP_PT_GTP | NPC_GTP_VER1, NPC_GTP_PT_MASK | NPC_GTP_VER_MASK, 0x0000, @@ -9192,159 +9040,127 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_IP6, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_ARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_RARP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_PTP, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, + NPC_S_KPU5_FCOE, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_CTAG2, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_CTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_SBTAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, + NPC_S_KPU2_QINQ, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 102, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, + NPC_S_KPU2_ETAG, 12, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_MPLS, + NPC_S_KPU4_MPLS, 14, 0, + NPC_LID_LA, NPC_LT_NA, + 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 104, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_WITH_NSH, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + NPC_S_KPU4_NSH, 14, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 56, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, + 0, 0, 0, 0, 1, + NPC_S_NA, 0, 0, + NPC_LID_LA, NPC_LT_NA, 0, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 54, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 8, 0, 6, 3, 0, - NPC_S_KPU5_CPT_IP, 60, 1, + NPC_S_KPU5_CPT_IP, 14, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, 0, 0, 0, 0, 0, @@ -9352,7 +9168,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 6, 0, 0, 3, 0, - NPC_S_KPU5_CPT_IP6, 60, 1, + NPC_S_KPU5_CPT_IP6, 14, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, 0, 0, 0, 0, 0, @@ -9360,7 +9176,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 58, 1, + NPC_S_KPU2_CPT_CTAG, 12, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, @@ -9368,141 +9184,13 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = { { NPC_ERRLEV_RE, NPC_EC_NOERR, 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 58, 1, + NPC_S_KPU2_CPT_QINQ, 12, 1, NPC_LID_LA, NPC_LT_LA_CPT_HDR, NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, 0, 0, 0, 0, }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CPT_HDR, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 3, 0, - NPC_S_KPU5_IP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 6, 0, 0, 3, 0, - NPC_S_KPU5_IP6, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_ARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_RARP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_PTP, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 3, 0, - NPC_S_KPU5_FCOE, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - 0, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 0, 0, 0, - NPC_S_KPU2_CTAG2, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_CTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 22, 0, 0, - NPC_S_KPU2_SBTAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 4, 8, 0, 0, 0, - NPC_S_KPU2_QINQ, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 12, 26, 0, 0, - NPC_S_KPU2_ETAG, 36, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 6, 10, 2, 0, - NPC_S_KPU4_MPLS, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_MPLS, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 2, 0, 0, 2, 0, - NPC_S_KPU4_NSH, 38, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_WITH_NSH, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 0, 0, 0, 0, 1, - NPC_S_NA, 0, 1, - NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER, - NPC_F_LA_L_UNK_ETYPE, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, 12, 0, 0, 1, 0, NPC_S_KPU3_VLAN_EXDSA, 12, 1, NPC_LID_LA, NPC_LT_LA_ETHER, @@ -10395,6 +10083,38 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = { 0, 0, 0, 0, }, { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 6, 1, + NPC_LID_LB, NPC_LT_LB_CTAG, + 0, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 8, 0, 6, 2, 0, + NPC_S_KPU5_CPT_IP, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { + NPC_ERRLEV_RE, NPC_EC_NOERR, + 6, 0, 0, 2, 0, + NPC_S_KPU5_CPT_IP6, 10, 1, + NPC_LID_LB, NPC_LT_LB_STAG_QINQ, + NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG, + 0, 0, 0, 0, + }, + { NPC_ERRLEV_LB, NPC_EC_L2_K3, 0, 0, 0, 0, 1, NPC_S_NA, 0, 0, @@ -14335,16 +14055,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = { }, { NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU12_TU_IP, 8, 1, - NPC_LID_LE, NPC_LT_LE_GTPU, - NPC_F_LE_L_GTPU_G_PDU, - 0, 0, 0, 0, - }, - { - NPC_ERRLEV_RE, NPC_EC_NOERR, - 8, 0, 6, 2, 0, - NPC_S_KPU12_TU_IP, 8, 1, + 8, 0, 6, 2, 1, + NPC_S_NA, 0, 1, NPC_LID_LE, NPC_LT_LE_GTPU, 0, 0, 0, 0, 0, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 9b8e59f4c206..d6321de3cc17 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -27,54 +27,29 @@ #define PCI_DEVID_CN10K_PTP 0xA09E #define PCI_PTP_BAR_NO 0 -#define PCI_RST_BAR_NO 0 #define PTP_CLOCK_CFG 0xF00ULL #define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0) +#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1) +#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2) +#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9) +#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8) +#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10) +#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30) +#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31) + +#define PTP_PPS_HI_INCR 0xF60ULL +#define PTP_PPS_LO_INCR 0xF68ULL +#define PTP_PPS_THRESH_HI 0xF58ULL + #define PTP_CLOCK_LO 0xF08ULL #define PTP_CLOCK_HI 0xF10ULL #define PTP_CLOCK_COMP 0xF18ULL - -#define RST_BOOT 0x1600ULL -#define RST_MUL_BITS GENMASK_ULL(38, 33) -#define CLOCK_BASE_RATE 50000000ULL +#define PTP_TIMESTAMP 0xF20ULL static struct ptp *first_ptp_block; static const struct pci_device_id ptp_id_table[]; -static u64 get_clock_rate(void) -{ - u64 cfg, ret = CLOCK_BASE_RATE * 16; - struct pci_dev *pdev; - void __iomem *base; - - /* To get the input clock frequency with which PTP co-processor - * block is running the base frequency(50 MHz) needs to be multiplied - * with multiplier bits present in RST_BOOT register of RESET block. - * Hence below code gets the multiplier bits from the RESET PCI - * device present in the system. - */ - pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, - PCI_DEVID_OCTEONTX2_RST, NULL); - if (!pdev) - goto error; - - base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO); - if (!base) - goto error_put_pdev; - - cfg = readq(base + RST_BOOT); - ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg); - - iounmap(base); - -error_put_pdev: - pci_dev_put(pdev); - -error: - return ret; -} - struct ptp *ptp_get(void) { struct ptp *ptp = first_ptp_block; @@ -145,13 +120,74 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk) return 0; } +void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts) +{ + struct pci_dev *pdev; + u64 clock_comp; + u64 clock_cfg; + + if (!ptp) + return; + + pdev = ptp->pdev; + + if (!sclk) { + dev_err(&pdev->dev, "PTP input clock cannot be zero\n"); + return; + } + + /* sclk is in MHz */ + ptp->clock_rate = sclk * 1000000; + + /* Enable PTP clock */ + clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); + + if (ext_clk_freq) { + ptp->clock_rate = ext_clk_freq; + /* Set GPIO as PTP clock source */ + clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK; + clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN; + } + + if (extts) { + clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE; + /* Set GPIO as timestamping source */ + clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK; + clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN; + } + + clock_cfg |= PTP_CLOCK_CFG_PTP_EN; + clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV; + writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); + + /* Set 50% duty cycle for 1Hz output */ + writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR); + writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR); + + clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; + /* Initial compensation value to start the nanosecs counter */ + writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); +} + +static int ptp_get_tstmp(struct ptp *ptp, u64 *clk) +{ + *clk = readq(ptp->reg_base + PTP_TIMESTAMP); + + return 0; +} + +static int ptp_set_thresh(struct ptp *ptp, u64 thresh) +{ + writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI); + + return 0; +} + static int ptp_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct ptp *ptp; - u64 clock_comp; - u64 clock_cfg; int err; ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL); @@ -172,17 +208,6 @@ static int ptp_probe(struct pci_dev *pdev, ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO]; - ptp->clock_rate = get_clock_rate(); - - /* Enable PTP clock */ - clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG); - clock_cfg |= PTP_CLOCK_CFG_PTP_EN; - writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG); - - clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate; - /* Initial compensation value to start the nanosecs counter */ - writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); - pci_set_drvdata(pdev, ptp); if (!first_ptp_block) first_ptp_block = ptp; @@ -272,6 +297,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req, case PTP_OP_GET_CLOCK: err = ptp_get_clock(rvu->ptp, &rsp->clk); break; + case PTP_OP_GET_TSTMP: + err = ptp_get_tstmp(rvu->ptp, &rsp->clk); + break; + case PTP_OP_SET_THRESH: + err = ptp_set_thresh(rvu->ptp, req->thresh); + break; default: err = -EINVAL; break; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index 76d404b24552..1b81a0493cd3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -20,6 +20,7 @@ struct ptp { struct ptp *ptp_get(void); void ptp_put(struct ptp *ptp); +void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts); extern struct pci_driver ptp_driver; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 07b0eafccad8..e695fa0e82a9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -29,6 +29,7 @@ static struct mac_ops rpm_mac_ops = { .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status, .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm, .mac_pause_frm_config = rpm_lmac_pause_frm_config, + .mac_enadis_ptp_config = rpm_lmac_ptp_config, }; struct mac_ops *rpm_get_mac_ops(void) @@ -270,3 +271,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable) return 0; } + +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable) +{ + rpm_t *rpm = rpmd; + u64 cfg; + + if (!is_lmac_valid(rpm, lmac_id)) + return; + + cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG); + if (enable) + cfg |= RPMX_RX_TS_PREPEND; + else + cfg &= ~RPMX_RX_TS_PREPEND; + rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index f0b069442dcc..57c8a687b488 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -14,6 +14,8 @@ #define PCI_DEVID_CN10K_RPM 0xA060 /* Registers */ +#define RPMX_CMRX_CFG 0x00 +#define RPMX_RX_TS_PREPEND BIT_ULL(22) #define RPMX_CMRX_SW_INT 0x180 #define RPMX_CMRX_SW_INT_W1S 0x188 #define RPMX_CMRX_SW_INT_ENA_W1S 0x198 @@ -54,4 +56,5 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause); int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat); int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat); +void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable); #endif /* RPM_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 35836903b7fb..4cb24e91e648 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1287,6 +1287,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, return (val & 0xFFF); } +int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, + u16 global_slot, u16 *slot_in_block) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int numlfs, total_lfs = 0, nr_blocks = 0; + int i, num_blkaddr[BLK_COUNT] = { 0 }; + struct rvu_block *block; + int blkaddr; + u16 start_slot; + + if (!is_blktype_attached(pfvf, blktype)) + return -ENODEV; + + /* Get all the block addresses from which LFs are attached to + * the given pcifunc in num_blkaddr[]. + */ + for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) { + block = &rvu->hw->block[blkaddr]; + if (block->type != blktype) + continue; + if (!is_block_implemented(rvu->hw, blkaddr)) + continue; + + numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr); + if (numlfs) { + total_lfs += numlfs; + num_blkaddr[nr_blocks] = blkaddr; + nr_blocks++; + } + } + + if (global_slot >= total_lfs) + return -ENODEV; + + /* Based on the given global slot number retrieve the + * correct block address out of all attached block + * addresses and slot number in that block. + */ + total_lfs = 0; + blkaddr = -ENODEV; + for (i = 0; i < nr_blocks; i++) { + numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]); + total_lfs += numlfs; + if (global_slot < total_lfs) { + blkaddr = num_blkaddr[i]; + start_slot = total_lfs - numlfs; + *slot_in_block = global_slot - start_slot; + break; + } + } + + return blkaddr; +} + static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -3186,6 +3240,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&rvu->rswitch.switch_lock); + if (rvu->fwdata) + ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate, + rvu->fwdata->ptp_ext_tstamp); + return 0; err_dl: rvu_unregister_dl(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 1d9411232f1d..58b166698fa5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -220,6 +220,7 @@ struct rvu_pfvf { u16 maxlen; u16 minlen; + bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */ u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */ @@ -237,6 +238,7 @@ struct rvu_pfvf { bool cgx_in_use; /* this PF/VF using CGX? */ int cgx_users; /* number of cgx users - used only by PFs */ + int intf_mode; u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ @@ -394,7 +396,9 @@ struct rvu_fwdata { u64 mcam_addr; u64 mcam_sz; u64 msixtr_base; -#define FWDATA_RESERVED_MEM 1023 + u32 ptp_ext_clk_rate; + u32 ptp_ext_tstamp; +#define FWDATA_RESERVED_MEM 1022 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 5 #define CGX_LMACS_MAX 4 @@ -656,6 +660,8 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_get_num_lbk_chans(void); +int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, + u16 global_slot, u16 *slot_in_block); /* RVU HW reg validation */ enum regmap_block { @@ -794,6 +800,7 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 src, struct mcam_entry *entry, u8 *intf, u8 *ena); +bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc); bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); @@ -827,4 +834,7 @@ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 81e8ea9ee30e..c3842e454657 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -411,7 +411,7 @@ int rvu_cgx_exit(struct rvu *rvu) * VF's of mapped PF and other PFs are not allowed. This fn() checks * whether a PFFUNC is permitted to do the config or not. */ -static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) +inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc) { if ((pcifunc & RVU_PFVF_FUNC_MASK) || !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) @@ -694,7 +694,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; u8 cgx_id, lmac_id; void *cgxd; @@ -711,13 +713,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgxd = rvu_cgx_pdata(cgx_id, rvu); - cgx_lmac_ptp_config(cgxd, lmac_id, enable); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true); /* If PTP is enabled then inform NPC that packets to be * parsed by this PF will have their data shifted by 8 bytes * and if PTP is disabled then no shift is required */ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable)) return -EINVAL; + /* This flag is required to clean up CGX conf if app gets killed */ + pfvf->hw_rx_tstamp_en = enable; return 0; } @@ -725,6 +730,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable) int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { + if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc))) + return -EPERM; + return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 46a41cfff575..7dbbc115cde4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -334,8 +334,8 @@ int rvu_set_channels_base(struct rvu *rvu) /* Out of 4096 channels start CPT from 2048 so * that MSB for CPT channels is always set */ - if (cpt_chan_base <= 0x800) { - hw->cpt_chan_base = 0x800; + if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) { + hw->cpt_chan_base = NIX_CHAN_CPT_CH_START; } else { dev_err(rvu->dev, "CPT channels could not fit in the range 2048-4095\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 1f90a7403392..267d092b8e97 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -197,6 +197,141 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req, return ret; } +static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf, + struct cpt_inline_ipsec_cfg_msg *req) +{ + u16 sso_pf_func = req->sso_pf_func; + u8 nix_sel; + u64 val; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + if (req->enable && (val & BIT_ULL(16))) { + /* IPSec inline outbound path is already enabled for a given + * CPT LF, HRM states that inline inbound & outbound paths + * must not be enabled at the same time for a given CPT LF + */ + return CPT_AF_ERR_INLINE_IPSEC_INB_ENA; + } + /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */ + if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO)) + return CPT_AF_ERR_SSO_PF_FUNC_INVALID; + + nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0; + /* Enable CPT LF for IPsec inline inbound operations */ + if (req->enable) + val |= BIT_ULL(9); + else + val &= ~BIT_ULL(9); + + val |= (u64)nix_sel << 8; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + + if (sso_pf_func) { + /* Set SSO_PF_FUNC */ + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + val |= (u64)sso_pf_func << 32; + val |= (u64)req->nix_pf_func << 48; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); + } + if (req->sso_pf_func_ovrd) + /* Set SSO_PF_FUNC_OVRD for inline IPSec */ + rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1); + + /* Configure the X2P Link register with the cpt base channel number and + * range of channels it should propagate to X2P + */ + if (!is_rvu_otx2(rvu)) { + val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16); + val |= rvu->hw->cpt_chan_base; + + rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val); + rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val); + } + + return 0; +} + +static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf, + struct cpt_inline_ipsec_cfg_msg *req) +{ + u16 nix_pf_func = req->nix_pf_func; + int nix_blkaddr; + u8 nix_sel; + u64 val; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + if (req->enable && (val & BIT_ULL(9))) { + /* IPSec inline inbound path is already enabled for a given + * CPT LF, HRM states that inline inbound & outbound paths + * must not be enabled at the same time for a given CPT LF + */ + return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA; + } + + /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */ + if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX)) + return CPT_AF_ERR_NIX_PF_FUNC_INVALID; + + /* Enable CPT LF for IPsec inline outbound operations */ + if (req->enable) + val |= BIT_ULL(16); + else + val &= ~BIT_ULL(16); + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + + if (nix_pf_func) { + /* Set NIX_PF_FUNC */ + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf)); + val |= (u64)nix_pf_func << 48; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val); + + nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func); + nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1; + + val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf)); + val |= (u64)nix_sel << 8; + rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val); + } + + return 0; +} + +int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu, + struct cpt_inline_ipsec_cfg_msg *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + int cptlf, blkaddr, ret; + u16 actual_slot; + + blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc, + req->slot, &actual_slot); + if (blkaddr < 0) + return CPT_AF_ERR_LF_INVALID; + + block = &rvu->hw->block[blkaddr]; + + cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot); + if (cptlf < 0) + return CPT_AF_ERR_LF_INVALID; + + switch (req->dir) { + case CPT_INLINE_INBOUND: + ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req); + break; + + case CPT_INLINE_OUTBOUND: + ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req); + break; + + default: + return CPT_AF_ERR_PARAM; + } + + return ret; +} + static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req) { u64 offset = req->reg_offset; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 274d3abe30eb..70bacd38a6d9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1510,13 +1510,6 @@ int rvu_register_dl(struct rvu *rvu) return -ENOMEM; } - err = devlink_register(dl); - if (err) { - dev_err(rvu->dev, "devlink register failed with error %d\n", err); - devlink_free(dl); - return err; - } - rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; @@ -1537,13 +1530,11 @@ int rvu_register_dl(struct rvu *rvu) goto err_dl_health; } - devlink_params_publish(dl); - + devlink_register(dl); return 0; err_dl_health: rvu_health_reporters_destroy(rvu); - devlink_unregister(dl); devlink_free(dl); return err; } @@ -1553,12 +1544,9 @@ void rvu_unregister_dl(struct rvu *rvu) struct rvu_devlink *rvu_dl = rvu->rvu_dl; struct devlink *dl = rvu_dl->dl; - if (!dl) - return; - + devlink_unregister(dl); devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params)); rvu_health_reporters_destroy(rvu); - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 9ef4e942e31e..67feb26792e4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -28,6 +28,7 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, u32 leaf_prof); +static const char *nix_get_ctx_name(int ctype); enum mc_tbl_sz { MC_TBL_SZ_256, @@ -1061,10 +1062,68 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw, return 0; } +static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw, + struct nix_aq_enq_req *req, u8 ctype) +{ + struct nix_cn10k_aq_enq_req aq_req; + struct nix_cn10k_aq_enq_rsp aq_rsp; + int rc, word; + + if (req->ctype != NIX_AQ_CTYPE_CQ) + return 0; + + rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, + req->hdr.pcifunc, ctype, req->qidx); + if (rc) { + dev_err(rvu->dev, + "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n", + __func__, nix_get_ctx_name(ctype), req->qidx, + req->hdr.pcifunc); + return rc; + } + + /* Make copy of original context & mask which are required + * for resubmission + */ + memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s)); + memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s)); + + /* exclude fields which HW can update */ + aq_req.cq_mask.cq_err = 0; + aq_req.cq_mask.wrptr = 0; + aq_req.cq_mask.tail = 0; + aq_req.cq_mask.head = 0; + aq_req.cq_mask.avg_level = 0; + aq_req.cq_mask.update_time = 0; + aq_req.cq_mask.substream = 0; + + /* Context mask (cq_mask) holds mask value of fields which + * are changed in AQ WRITE operation. + * for example cq.drop = 0xa; + * cq_mask.drop = 0xff; + * Below logic performs '&' between cq and cq_mask so that non + * updated fields are masked out for request and response + * comparison + */ + for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64); + word++) { + *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + *(u64 *)((u8 *)&aq_req.cq + word * 8) &= + (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8)); + } + + if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s))) + return NIX_AF_ERR_AQ_CTX_RETRY_WRITE; + + return 0; +} + static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { struct nix_hw *nix_hw; + int err, retries = 5; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); @@ -1075,7 +1134,24 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; - return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); +retry: + err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); + + /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic' + * As a work around perfrom CQ context read after each AQ write. If AQ + * read shows AQ write is not updated perform AQ write again. + */ + if (!err && req->op == NIX_AQ_INSTOP_WRITE) { + err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ); + if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) { + if (retries--) + goto retry; + else + return NIX_AF_ERR_CQ_CTX_WRITE_ERR; + } + } + + return err; } static const char *nix_get_ctx_name(int ctype) @@ -4440,6 +4516,10 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct hwctx_disable_req ctx_req; + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; int err; ctx_req.hdr.pcifunc = pcifunc; @@ -4476,6 +4556,22 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) dev_err(rvu->dev, "CQ ctx disable failed\n"); } + /* reset HW config done for Switch headers */ + rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT, + (PKIND_TX | PKIND_RX), 0, 0, 0, 0); + + /* Disabling CGX and NPC config done for PTP */ + if (pfvf->hw_rx_tstamp_en) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false); + /* Undo NPC config done for PTP */ + if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false)) + dev_err(rvu->dev, "NPC config for PTP failed\n"); + pfvf->hw_rx_tstamp_en = false; + } + nix_ctx_free(rvu, pfvf); nix_free_all_bandprof(rvu, pcifunc); @@ -4579,6 +4675,119 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, return 0; } +#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48) +#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32) +#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16) +#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0) + +#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24) +#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8) +#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0) + +static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req, + int blkaddr) +{ + u8 cpt_idx, cpt_blkaddr; + u64 val; + + cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1; + if (req->enable) { + val = 0; + /* Enable context prefetching */ + if (!is_rvu_otx2(rvu)) + val |= BIT_ULL(51); + + /* Set OPCODE and EGRP */ + val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp); + val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1); + val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2); + + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val); + + /* Set CPT queue for inline IPSec */ + val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot); + val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC, + req->inst_qsel.cpt_pf_func); + + if (!is_rvu_otx2(rvu)) { + cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 : + BLKADDR_CPT1; + val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr); + } + + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + val); + + /* Set CPT credit */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + req->cpt_credit); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx), + 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), + 0x3FFFFF); + } +} + +int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu, + struct nix_inline_ipsec_cfg *req, + struct msg_rsp *rsp) +{ + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0); + if (is_block_implemented(rvu->hw, BLKADDR_CPT1)) + nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1); + + return 0; +} + +int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu, + struct nix_inline_ipsec_lf_cfg *req, + struct msg_rsp *rsp) +{ + int lf, blkaddr, err; + u64 val; + + if (!is_block_implemented(rvu->hw, BLKADDR_CPT0)) + return 0; + + err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr); + if (err) + return err; + + if (req->enable) { + /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */ + val = (u64)req->ipsec_cfg0.tt << 44 | + (u64)req->ipsec_cfg0.tag_const << 20 | + (u64)req->ipsec_cfg0.sa_pow2_size << 16 | + req->ipsec_cfg0.lenm1_max; + + if (blkaddr == BLKADDR_NIX1) + val |= BIT_ULL(46); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val); + + /* Set SA_IDX_W and SA_IDX_MAX */ + val = (u64)req->ipsec_cfg1.sa_idx_w << 32 | + req->ipsec_cfg1.sa_idx_max; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val); + + /* Set SA base address */ + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + req->sa_base_addr); + } else { + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0); + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf), + 0x0); + } + + return 0; +} void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc) { bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 5efb4174e82d..bb6b42bbefa4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -3167,6 +3167,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, return 0; } +static int +npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind, + u8 var_len_off, u8 var_len_off_mask, u8 shift_dir) +{ + struct npc_kpu_action0 *act0; + u8 shift_count = 0; + int blkaddr; + u64 val; + + if (!var_len_off_mask) + return -EINVAL; + + if (var_len_off_mask != 0xff) { + if (shift_dir) + shift_count = __ffs(var_len_off_mask); + else + shift_count = (8 - __fls(var_len_off_mask)); + } + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc); + if (blkaddr < 0) { + dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); + return -EINVAL; + } + val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind)); + act0 = (struct npc_kpu_action0 *)&val; + act0->var_len_shift = shift_count; + act0->var_len_right = shift_dir; + act0->var_len_mask = var_len_off_mask; + act0->var_len_offset = var_len_off; + rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val); + return 0; +} + +int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, + u64 pkind, u8 var_len_off, u8 var_len_off_mask, + u8 shift_dir) + +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr, nixlf, rc, intf_mode; + int pf = rvu_get_pf(pcifunc); + u64 rxpkind, txpkind; + u8 cgx_id, lmac_id; + + /* use default pkind to disable edsa/higig */ + rxpkind = rvu_npc_get_pkind(rvu, pf); + txpkind = NPC_TX_DEF_PKIND; + intf_mode = NPC_INTF_MODE_DEF; + + if (mode & OTX2_PRIV_FLAGS_CUSTOM) { + if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) { + rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind, + var_len_off, + var_len_off_mask, + shift_dir); + if (rc) + return rc; + } + rxpkind = pkind; + txpkind = pkind; + } + + if (dir & PKIND_RX) { + /* rx pkind set req valid only for cgx mapped PFs */ + if (!is_cgx_config_permitted(rvu, pcifunc)) + return 0; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + + rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, + rxpkind); + if (rc) + return rc; + } + + if (dir & PKIND_TX) { + /* Tx pkind set request valid if PCIFUNC has NIXLF attached */ + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), + txpkind); + } + + pfvf->intf_mode = intf_mode; + return 0; +} + +int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req, + struct msg_rsp *rsp) +{ + return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode, + req->dir, req->pkind, req->var_len_off, + req->var_len_off_mask, req->shift_dir); +} + int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu, struct msg_req *req, struct npc_mcam_read_base_rule_rsp *rsp) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 21f1ed4e222f..dbaeb10de7c2 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -236,6 +236,8 @@ #define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8) #define NIX_AF_RX_IPSEC_GEN_CFG (0x0300) #define NIX_AF_RX_CPTX_INST_ADDR (0x0310) +#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3) +#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3) #define NIX_AF_NDC_TX_SYNC (0x03F0) #define NIX_AF_AQ_CFG (0x0400) #define NIX_AF_AQ_BASE (0x0410) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index b92c267628b8..aaf9accc40ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -9,6 +9,6 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ otx2_devlink.o -rvu_nicvf-y := otx2_vf.o otx2_devlink.o +rvu_nicvf-y := otx2_vf.o otx2_devlink.o otx2_ptp.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 78df173e6df2..0aa88cea1676 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -718,7 +718,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) int timeout = 1000; ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); - for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { incr = (u64)qidx << 32; while (timeout) { val = otx2_atomic64_add(incr, ptr); @@ -835,17 +835,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (err) return err; - err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, - TSO_HEADER_SIZE); - if (err) - return err; + if (qidx < pfvf->hw.tx_queues) { + err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, + TSO_HEADER_SIZE); + if (err) + return err; + } sq->sqe_base = sq->sqe->base; sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); if (!sq->sg) return -ENOMEM; - if (pfvf->ptp) { + if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); if (err) @@ -871,20 +873,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) { struct otx2_qset *qset = &pfvf->qset; + int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; - int err, pool_id; cq = &qset->cq[qidx]; cq->cq_idx = qidx; + non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; if (qidx < pfvf->hw.rx_queues) { cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; - } else { + if (pfvf->xdp_prog) + xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); + } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; cq->cqe_cnt = qset->sqe_cnt; + } else { + cq->cq_type = CQ_XDP; + cq->cint_idx = qidx - non_xdp_queues; + cq->cqe_cnt = qset->sqe_cnt; } cq->cqe_size = pfvf->qset.xqe_size; @@ -991,7 +1000,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf) } /* Initialize TX queues */ - for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) { u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); err = otx2_sq_init(pfvf, qidx, sqb_aura); @@ -1006,6 +1015,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf) return err; } + pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, + NIX_LF_CQ_OP_STATUS); + /* Initialize work queue for receive buffer refill */ pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, sizeof(struct refill_work), GFP_KERNEL); @@ -1035,7 +1047,7 @@ int otx2_config_nix(struct otx2_nic *pfvf) /* Set RQ/SQ/CQ counts */ nixlf->rq_cnt = pfvf->hw.rx_queues; - nixlf->sq_cnt = pfvf->hw.tx_queues; + nixlf->sq_cnt = pfvf->hw.tot_tx_queues; nixlf->cq_cnt = pfvf->qset.cq_cnt; nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; nixlf->rss_grps = MAX_RSS_GROUPS; @@ -1073,7 +1085,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf) int sqb, qidx; u64 iova, pa; - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { sq = &qset->sq[qidx]; if (!sq->sqb_ptrs) continue; @@ -1285,7 +1297,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) stack_pages = (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); /* Initialize aura context */ err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); @@ -1305,7 +1317,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) goto fail; /* Allocate pointers and free them to aura/pool */ - for (qidx = 0; qidx < hw->tx_queues; qidx++) { + for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) { pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); pool = &pfvf->qset.pool[pool_id]; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index a51ecd771d07..a05f6bdac01e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -171,6 +171,8 @@ struct otx2_hw { struct otx2_rss_info rss_info; u16 rx_queues; u16 tx_queues; + u16 xdp_queues; + u16 tot_tx_queues; u16 max_queues; u16 pool_cnt; u16 rqpool_cnt; @@ -223,6 +225,7 @@ struct otx2_hw { #define HW_TSO 0 #define CN10K_MBOX 1 #define CN10K_LMTST 2 +#define CN10K_RPM 3 unsigned long cap_flag; #define LMT_LINE_SIZE 128 @@ -263,6 +266,12 @@ struct otx2_ptp { struct cyclecounter cycle_counter; struct timecounter time_counter; + + struct delayed_work extts_work; + u64 last_extts; + u64 thresh; + + struct ptp_pin_desc extts_config; }; #define OTX2_HW_TIMESTAMP_LEN 8 @@ -336,7 +345,9 @@ struct otx2_nic { #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) #define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; + u64 *cq_op_addr; + struct bpf_prog *xdp_prog; struct otx2_qset qset; struct otx2_hw hw; struct pci_dev *pdev; @@ -452,6 +463,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) if (!is_dev_otx2(pfvf->pdev)) { __set_bit(CN10K_MBOX, &hw->cap_flag); __set_bit(CN10K_LMTST, &hw->cap_flag); + __set_bit(CN10K_RPM, &hw->cap_flag); } } @@ -825,6 +837,9 @@ int otx2_open(struct net_device *netdev); int otx2_stop(struct net_device *netdev); int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); +int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd); +int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr); + /* MCAM filter related APIs */ int otx2_mcam_flow_init(struct otx2_nic *pf); int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); @@ -845,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 7ac3ef2fa06a..777a27047c8e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -108,13 +108,6 @@ int otx2_register_dl(struct otx2_nic *pfvf) return -ENOMEM; } - err = devlink_register(dl); - if (err) { - dev_err(pfvf->dev, "devlink register failed with error %d\n", err); - devlink_free(dl); - return err; - } - otx2_dl = devlink_priv(dl); otx2_dl->dl = dl; otx2_dl->pfvf = pfvf; @@ -128,12 +121,10 @@ int otx2_register_dl(struct otx2_nic *pfvf) goto err_dl; } - devlink_params_publish(dl); - + devlink_register(dl); return 0; err_dl: - devlink_unregister(dl); devlink_free(dl); return err; } @@ -141,16 +132,10 @@ err_dl: void otx2_unregister_dl(struct otx2_nic *pfvf) { struct otx2_devlink *otx2_dl = pfvf->dl; - struct devlink *dl; - - if (!otx2_dl || !otx2_dl->dl) - return; - - dl = otx2_dl->dl; + struct devlink *dl = otx2_dl->dl; + devlink_unregister(dl); devlink_params_unregister(dl, otx2_dl_params, ARRAY_SIZE(otx2_dl_params)); - - devlink_unregister(dl); devlink_free(dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index dbfa3bc39e34..b0f57bda7e27 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -121,14 +121,16 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data) otx2_get_qset_strings(pfvf, &data, 0); - for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { - sprintf(data, "cgx_rxstat%d: ", stats); - data += ETH_GSTRING_LEN; - } + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { + for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) { + sprintf(data, "cgx_rxstat%d: ", stats); + data += ETH_GSTRING_LEN; + } - for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { - sprintf(data, "cgx_txstat%d: ", stats); - data += ETH_GSTRING_LEN; + for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) { + sprintf(data, "cgx_txstat%d: ", stats); + data += ETH_GSTRING_LEN; + } } strcpy(data, "reset_count"); @@ -205,11 +207,15 @@ static void otx2_get_ethtool_stats(struct net_device *netdev, [otx2_drv_stats[stat].index]); otx2_get_qset_stats(pfvf, stats, &data); - otx2_update_lmac_stats(pfvf); - for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) - *(data++) = pfvf->hw.cgx_rx_stats[stat]; - for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) - *(data++) = pfvf->hw.cgx_tx_stats[stat]; + + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) { + otx2_update_lmac_stats(pfvf); + for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_rx_stats[stat]; + for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++) + *(data++) = pfvf->hw.cgx_tx_stats[stat]; + } + *(data++) = pfvf->reset_count; fec_corr_blks = pfvf->hw.cgx_fec_corr_blks; @@ -242,18 +248,19 @@ static void otx2_get_ethtool_stats(struct net_device *netdev, static int otx2_get_sset_count(struct net_device *netdev, int sset) { struct otx2_nic *pfvf = netdev_priv(netdev); - int qstats_count; + int qstats_count, mac_stats = 0; if (sset != ETH_SS_STATS) return -EINVAL; qstats_count = otx2_n_queue_stats * (pfvf->hw.rx_queues + pfvf->hw.tx_queues); + if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) + mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT; otx2_update_lmac_fec_stats(pfvf); return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + - CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT - + 1; + mac_stats + OTX2_FEC_STATS_CNT + 1; } /* Get no of queues device supports and current queue count */ @@ -1340,6 +1347,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_pauseparam = otx2_get_pauseparam, .set_pauseparam = otx2_set_pauseparam, .get_link_ksettings = otx2vf_get_link_ksettings, + .get_ts_info = otx2_get_ts_info, }; void otx2vf_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 53df7fff92c4..f24e920324eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -13,6 +13,8 @@ #include <linux/if_vlan.h> #include <linux/iommu.h> #include <net/ip.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable); static int otx2_change_mtu(struct net_device *netdev, int new_mtu) { + struct otx2_nic *pf = netdev_priv(netdev); bool if_up = netif_running(netdev); int err = 0; + if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) { + netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", + netdev->mtu); + return -EINVAL; + } if (if_up) otx2_stop(netdev); @@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) } /* SQ */ - for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); val = otx2_atomic64_add((qidx << 44), ptr); otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | @@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf) otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false); /* Free SQB pointers */ otx2_sq_free_sqbs(pf); - for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) { + for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { sq = &qset->sq[qidx]; qmem_free(pf->dev, sq->sqe); qmem_free(pf->dev, sq->tso_hdrs); @@ -1332,7 +1340,7 @@ static int otx2_init_hw_resources(struct otx2_nic *pf) * so, aura count = pool count. */ hw->rqpool_cnt = hw->rx_queues; - hw->sqpool_cnt = hw->tx_queues; + hw->sqpool_cnt = hw->tot_tx_queues; hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt; pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; @@ -1493,6 +1501,44 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) mutex_unlock(&mbox->lock); } +static void otx2_do_set_rx_mode(struct otx2_nic *pf) +{ + struct net_device *netdev = pf->netdev; + struct nix_rx_mode *req; + bool promisc = false; + + if (!(netdev->flags & IFF_UP)) + return; + + if ((netdev->flags & IFF_PROMISC) || + (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { + promisc = true; + } + + /* Write unicast address to mcam entries or del from mcam */ + if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) + __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return; + } + + req->mode = NIX_RX_MODE_UCAST; + + if (promisc) + req->mode |= NIX_RX_MODE_PROMISC; + if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) + req->mode |= NIX_RX_MODE_ALLMULTI; + + req->mode |= NIX_RX_MODE_USE_MCE; + + otx2_sync_mbox_msg(&pf->mbox); + mutex_unlock(&pf->mbox.lock); +} + int otx2_open(struct net_device *netdev) { struct otx2_nic *pf = netdev_priv(netdev); @@ -1503,7 +1549,7 @@ int otx2_open(struct net_device *netdev) netif_carrier_off(netdev); - pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues; + pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues; /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. */ @@ -1523,7 +1569,7 @@ int otx2_open(struct net_device *netdev) if (!qset->cq) goto err_free_mem; - qset->sq = kcalloc(pf->hw.tx_queues, + qset->sq = kcalloc(pf->hw.tot_tx_queues, sizeof(struct otx2_snd_queue), GFP_KERNEL); if (!qset->sq) goto err_free_mem; @@ -1544,11 +1590,20 @@ int otx2_open(struct net_device *netdev) /* RQ0 & SQ0 are mapped to CINT0 and so on.. * 'cq_ids[0]' points to RQ's CQ and * 'cq_ids[1]' points to SQ's CQ and + * 'cq_ids[2]' points to XDP's CQ and */ cq_poll->cq_ids[CQ_RX] = (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ; cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ? qidx + pf->hw.rx_queues : CINT_INVALID_CQ; + if (pf->xdp_prog) + cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ? + (qidx + pf->hw.rx_queues + + pf->hw.tx_queues) : + CINT_INVALID_CQ; + else + cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ; + cq_poll->dev = (void *)pf; netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler, NAPI_POLL_WEIGHT); @@ -1646,6 +1701,8 @@ int otx2_open(struct net_device *netdev) if (err) goto err_tx_stop_queues; + otx2_do_set_rx_mode(pf); + return 0; err_tx_stop_queues: @@ -1791,43 +1848,11 @@ static void otx2_set_rx_mode(struct net_device *netdev) queue_work(pf->otx2_wq, &pf->rx_mode_work); } -static void otx2_do_set_rx_mode(struct work_struct *work) +static void otx2_rx_mode_wrk_handler(struct work_struct *work) { struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work); - struct net_device *netdev = pf->netdev; - struct nix_rx_mode *req; - bool promisc = false; - - if (!(netdev->flags & IFF_UP)) - return; - - if ((netdev->flags & IFF_PROMISC) || - (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { - promisc = true; - } - - /* Write unicast address to mcam entries or del from mcam */ - if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT) - __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter); - - mutex_lock(&pf->mbox.lock); - req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox); - if (!req) { - mutex_unlock(&pf->mbox.lock); - return; - } - - req->mode = NIX_RX_MODE_UCAST; - - if (promisc) - req->mode |= NIX_RX_MODE_PROMISC; - if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) - req->mode |= NIX_RX_MODE_ALLMULTI; - req->mode |= NIX_RX_MODE_USE_MCE; - - otx2_sync_mbox_msg(&pf->mbox); - mutex_unlock(&pf->mbox.lock); + otx2_do_set_rx_mode(pf); } static int otx2_set_features(struct net_device *netdev, @@ -1967,7 +1992,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable) return 0; } -static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) +int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config config; @@ -2023,8 +2048,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +EXPORT_SYMBOL(otx2_config_hwtstamp); -static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) { struct otx2_nic *pfvf = netdev_priv(netdev); struct hwtstamp_config *cfg = &pfvf->tstamp; @@ -2039,6 +2065,7 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) return -EOPNOTSUPP; } } +EXPORT_SYMBOL(otx2_ioctl); static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac) { @@ -2281,6 +2308,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf, return 0; } +static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, + int qidx) +{ + struct page *page; + u64 dma_addr; + int err = 0; + + dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data), + offset_in_page(xdpf->data), xdpf->len, + DMA_TO_DEVICE); + if (dma_mapping_error(pf->dev, dma_addr)) + return -ENOMEM; + + err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); + if (!err) { + otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); + page = virt_to_page(xdpf->data); + put_page(page); + return -ENOMEM; + } + return 0; +} + +static int otx2_xdp_xmit(struct net_device *netdev, int n, + struct xdp_frame **frames, u32 flags) +{ + struct otx2_nic *pf = netdev_priv(netdev); + int qidx = smp_processor_id(); + struct otx2_snd_queue *sq; + int drops = 0, i; + + if (!netif_running(netdev)) + return -ENETDOWN; + + qidx += pf->hw.tx_queues; + sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL; + + /* Abort xmit if xdp queue is not */ + if (unlikely(!sq)) + return -ENXIO; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + int err; + + err = otx2_xdp_xmit_tx(pf, xdpf, qidx); + if (err) + drops++; + } + return n - drops; +} + +static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) +{ + struct net_device *dev = pf->netdev; + bool if_up = netif_running(pf->netdev); + struct bpf_prog *old_prog; + + if (prog && dev->mtu > MAX_XDP_MTU) { + netdev_warn(dev, "Jumbo frames not yet supported with XDP\n"); + return -EOPNOTSUPP; + } + + if (if_up) + otx2_stop(pf->netdev); + + old_prog = xchg(&pf->xdp_prog, prog); + + if (old_prog) + bpf_prog_put(old_prog); + + if (pf->xdp_prog) + bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1); + + /* Network stack and XDP shared same rx queues. + * Use separate tx queues for XDP and network stack. + */ + if (pf->xdp_prog) + pf->hw.xdp_queues = pf->hw.rx_queues; + else + pf->hw.xdp_queues = 0; + + pf->hw.tot_tx_queues += pf->hw.xdp_queues; + + if (if_up) + otx2_open(pf->netdev); + + return 0; +} + +static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +{ + struct otx2_nic *pf = netdev_priv(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return otx2_xdp_setup(pf, xdp->prog); + default: + return -EINVAL; + } +} + static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf, int req_perm) { @@ -2348,6 +2480,8 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, + .ndo_bpf = otx2_xdp, + .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, }; @@ -2358,7 +2492,7 @@ static int otx2_wq_init(struct otx2_nic *pf) if (!pf->otx2_wq) return -ENOMEM; - INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode); + INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler); INIT_WORK(&pf->reset_task, otx2_reset_task); return 0; } @@ -2489,6 +2623,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->pdev = pdev; hw->rx_queues = qcount; hw->tx_queues = qcount; + hw->tot_tx_queues = qcount; hw->max_queues = qcount; num_vec = pci_msix_vec_count(pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index ec9e49985c2c..85b1f140d3dd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -27,6 +27,23 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) return otx2_sync_mbox_msg(&ptp->nic->mbox); } +static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh) +{ + struct ptp_req *req; + + if (!ptp->nic) + return -ENODEV; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return -ENOMEM; + + req->op = PTP_OP_SET_THRESH; + req->thresh = thresh; + + return otx2_sync_mbox_msg(&ptp->nic->mbox); +} + static u64 ptp_cc_read(const struct cyclecounter *cc) { struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter); @@ -55,6 +72,33 @@ static u64 ptp_cc_read(const struct cyclecounter *cc) return rsp->clk; } +static u64 ptp_tstmp_read(struct otx2_ptp *ptp) +{ + struct ptp_req *req; + struct ptp_rsp *rsp; + int err; + + if (!ptp->nic) + return 0; + + req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox); + if (!req) + return 0; + + req->op = PTP_OP_GET_TSTMP; + + err = otx2_sync_mbox_msg(&ptp->nic->mbox); + if (err) + return 0; + + rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0, + &req->hdr); + if (IS_ERR(rsp)) + return 0; + + return rsp->clk; +} + static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) { struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, @@ -102,9 +146,73 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info, return 0; } +static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + break; + case PTP_PF_PEROUT: + case PTP_PF_PHYSYNC: + return -1; + } + return 0; +} + +static void otx2_ptp_extts_check(struct work_struct *work) +{ + struct otx2_ptp *ptp = container_of(work, struct otx2_ptp, + extts_work.work); + struct ptp_clock_event event; + u64 tstmp, new_thresh; + + mutex_lock(&ptp->nic->mbox.lock); + tstmp = ptp_tstmp_read(ptp); + mutex_unlock(&ptp->nic->mbox.lock); + + if (tstmp != ptp->last_extts) { + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp); + ptp_clock_event(ptp->ptp_clock, &event); + ptp->last_extts = tstmp; + + new_thresh = tstmp % 500000000; + if (ptp->thresh != new_thresh) { + mutex_lock(&ptp->nic->mbox.lock); + ptp_set_thresh(ptp, new_thresh); + mutex_unlock(&ptp->nic->mbox.lock); + ptp->thresh = new_thresh; + } + } + schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); +} + static int otx2_ptp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, int on) { + struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp, + ptp_info); + int pin; + + if (!ptp->nic) + return -ENODEV; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (pin < 0) + return -EBUSY; + if (on) + schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200)); + else + cancel_delayed_work_sync(&ptp->extts_work); + return 0; + default: + break; + } return -EOPNOTSUPP; } @@ -115,6 +223,11 @@ int otx2_ptp_init(struct otx2_nic *pfvf) struct ptp_req *req; int err; + if (is_otx2_lbkvf(pfvf->pdev)) { + pfvf->ptp = NULL; + return 0; + } + mutex_lock(&pfvf->mbox.lock); /* check if PTP block is available */ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox); @@ -149,20 +262,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf) timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter, ktime_to_ns(ktime_get_real())); + snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP"); + ptp_ptr->extts_config.index = 0; + ptp_ptr->extts_config.func = PTP_PF_NONE; + ptp_ptr->ptp_info = (struct ptp_clock_info) { .owner = THIS_MODULE, .name = "OcteonTX2 PTP", .max_adj = 1000000000ull, - .n_ext_ts = 0, - .n_pins = 0, + .n_ext_ts = 1, + .n_pins = 1, .pps = 0, + .pin_config = &ptp_ptr->extts_config, .adjfine = otx2_ptp_adjfine, .adjtime = otx2_ptp_adjtime, .gettime64 = otx2_ptp_gettime, .settime64 = otx2_ptp_settime, .enable = otx2_ptp_enable, + .verify = otx2_ptp_verify_pin, }; + INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check); + ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev); if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) { err = ptp_ptr->ptp_clock ? diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index f42b1d4e0c67..2d2b67319340 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -8,6 +8,8 @@ #include <linux/etherdevice.h> #include <net/ip.h> #include <net/tso.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -17,6 +19,35 @@ #include "cn10k.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) +static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, + struct bpf_prog *prog, + struct nix_cqe_rx_s *cqe, + struct otx2_cq_queue *cq); + +static int otx2_nix_cq_op_status(struct otx2_nic *pfvf, + struct otx2_cq_queue *cq) +{ + u64 incr = (u64)(cq->cq_idx) << 32; + u64 status; + + status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr); + + if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) || + status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) { + dev_err(pfvf->dev, "CQ stopped due to error"); + return -EINVAL; + } + + cq->cq_tail = status & 0xFFFFF; + cq->cq_head = (status >> 20) & 0xFFFFF; + if (cq->cq_tail < cq->cq_head) + cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) + + cq->cq_tail; + else + cq->pend_cqe = cq->cq_tail - cq->cq_head; + + return 0; +} static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq) { @@ -73,6 +104,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg) sg->num_segs = 0; } +static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, + struct otx2_snd_queue *sq, + struct nix_cqe_tx_s *cqe) +{ + struct nix_send_comp_s *snd_comp = &cqe->comp; + struct sg_list *sg; + struct page *page; + u64 pa; + + sg = &sq->sg[snd_comp->sqe_id]; + + pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); + otx2_dma_unmap_page(pfvf, sg->dma_addr[0], + sg->size[0], DMA_TO_DEVICE); + page = virt_to_page(phys_to_virt(pa)); + put_page(page); +} + static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, struct otx2_snd_queue *sq, @@ -285,6 +334,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, return; } + if (pfvf->xdp_prog) + if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq)) + return; + skb = napi_get_frags(napi); if (unlikely(!skb)) return; @@ -318,7 +371,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe; int processed_cqe = 0; - while (likely(processed_cqe < budget)) { + if (cq->pend_cqe >= budget) + goto process_cqe; + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return 0; + +process_cqe: + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head); if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID || !cqe->sg.seg_addr) { @@ -334,17 +394,13 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf, cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; cqe->sg.seg_addr = 0x00; processed_cqe++; + cq->pend_cqe--; } /* Free CQEs to HW */ otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR, ((u64)cq->cq_idx << 32) | processed_cqe); - if (unlikely(!cq->pool_ptrs)) - return 0; - /* Refill pool with new buffers */ - pfvf->hw_ops->refill_pool_ptrs(pfvf, cq); - return processed_cqe; } @@ -364,22 +420,36 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { - int tx_pkts = 0, tx_bytes = 0; + int tx_pkts = 0, tx_bytes = 0, qidx; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; - while (likely(processed_cqe < budget)) { + if (cq->pend_cqe >= budget) + goto process_cqe; + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return 0; + +process_cqe: + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); if (unlikely(!cqe)) { if (!processed_cqe) return 0; break; } - otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx], - cqe, budget, &tx_pkts, &tx_bytes); - + if (cq->cq_type == CQ_XDP) { + qidx = cq->cq_idx - pfvf->hw.rx_queues; + otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], + cqe); + } else { + otx2_snd_pkt_handler(pfvf, cq, + &pfvf->qset.sq[cq->cint_idx], + cqe, budget, &tx_pkts, &tx_bytes); + } cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; + cq->pend_cqe--; } /* Free CQEs to HW */ @@ -402,6 +472,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, int otx2_napi_handler(struct napi_struct *napi, int budget) { + struct otx2_cq_queue *rx_cq = NULL; struct otx2_cq_poll *cq_poll; int workdone = 0, cq_idx, i; struct otx2_cq_queue *cq; @@ -412,17 +483,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) pfvf = (struct otx2_nic *)cq_poll->dev; qset = &pfvf->qset; - for (i = CQS_PER_CINT - 1; i >= 0; i--) { + for (i = 0; i < CQS_PER_CINT; i++) { cq_idx = cq_poll->cq_ids[i]; if (unlikely(cq_idx == CINT_INVALID_CQ)) continue; cq = &qset->cq[cq_idx]; if (cq->cq_type == CQ_RX) { - /* If the RQ refill WQ task is running, skip napi - * scheduler for this queue. - */ - if (cq->refill_task_sched) - continue; + rx_cq = cq; workdone += otx2_rx_napi_handler(pfvf, napi, cq, budget); } else { @@ -430,6 +497,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } } + if (rx_cq && rx_cq->pool_ptrs) + pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -936,10 +1005,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) int processed_cqe = 0; u64 iova, pa; - while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) { - if (!cqe->sg.subdc) - continue; + if (pfvf->xdp_prog) + xdp_rxq_info_unreg(&cq->xdp_rxq); + + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return; + + while (cq->pend_cqe) { + cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; + cq->pend_cqe--; + + if (!cqe) + continue; if (cqe->sg.segs > 1) { otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); continue; @@ -965,7 +1043,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) sq = &pfvf->qset.sq[cq->cint_idx]; - while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) { + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + return; + + while (cq->pend_cqe) { + cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); + processed_cqe++; + cq->pend_cqe--; + + if (!cqe) + continue; sg = &sq->sg[cqe->comp.sqe_id]; skb = (struct sk_buff *)sg->skb; if (skb) { @@ -973,7 +1060,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) dev_kfree_skb_any(skb); sg->skb = (u64)NULL; } - processed_cqe++; } /* Free CQEs to HW */ @@ -1001,3 +1087,116 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable) mutex_unlock(&pfvf->mbox.lock); return err; } + +static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, + int len, int *offset) +{ + struct nix_sqe_sg_s *sg = NULL; + u64 *iova = NULL; + + sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset); + sg->ld_type = NIX_SEND_LDTYPE_LDD; + sg->subdc = NIX_SUBDC_SG; + sg->segs = 1; + sg->seg1_size = len; + iova = (void *)sg + sizeof(*sg); + *iova = dma_addr; + *offset += sizeof(*sg) + sizeof(u64); + + sq->sg[sq->head].dma_addr[0] = dma_addr; + sq->sg[sq->head].size[0] = len; + sq->sg[sq->head].num_segs = 1; +} + +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) +{ + struct nix_sqe_hdr_s *sqe_hdr; + struct otx2_snd_queue *sq; + int offset, free_sqe; + + sq = &pfvf->qset.sq[qidx]; + free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + if (free_sqe < sq->sqe_thresh) + return false; + + memset(sq->sqe_base + 8, 0, sq->sqe_size - 8); + + sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base); + + if (!sqe_hdr->total) { + sqe_hdr->aura = sq->aura_id; + sqe_hdr->df = 1; + sqe_hdr->sq = qidx; + sqe_hdr->pnc = 1; + } + sqe_hdr->total = len; + sqe_hdr->sqe_id = sq->head; + + offset = sizeof(*sqe_hdr); + + otx2_xdp_sqe_add_sg(sq, iova, len, &offset); + sqe_hdr->sizem1 = (offset / 16) - 1; + pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); + + return true; +} + +static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, + struct bpf_prog *prog, + struct nix_cqe_rx_s *cqe, + struct otx2_cq_queue *cq) +{ + unsigned char *hard_start, *data; + int qidx = cq->cq_idx; + struct xdp_buff xdp; + struct page *page; + u64 iova, pa; + u32 act; + int err; + + iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + page = virt_to_page(phys_to_virt(pa)); + + xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); + + data = (unsigned char *)phys_to_virt(pa); + hard_start = page_address(page); + xdp_prepare_buff(&xdp, hard_start, data - hard_start, + cqe->sg.seg_size, false); + + act = bpf_prog_run_xdp(prog, &xdp); + + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + qidx += pfvf->hw.tx_queues; + cq->pool_ptrs++; + return otx2_xdp_sq_append_pkt(pfvf, iova, + cqe->sg.seg_size, qidx); + case XDP_REDIRECT: + cq->pool_ptrs++; + err = xdp_do_redirect(pfvf->netdev, &xdp, prog); + + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + if (!err) + return true; + put_page(page); + break; + default: + bpf_warn_invalid_xdp_action(act); + break; + case XDP_ABORTED: + trace_xdp_exception(pfvf->netdev, prog, act); + break; + case XDP_DROP: + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + put_page(page); + cq->pool_ptrs++; + return true; + } + return false; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 3ff1ad79c001..5c05774a8d05 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -11,6 +11,7 @@ #include <linux/etherdevice.h> #include <linux/iommu.h> #include <linux/if_vlan.h> +#include <net/xdp.h> #define LBK_CHAN_BASE 0x000 #define SDP_CHAN_BASE 0x700 @@ -25,6 +26,8 @@ #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 +#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN) + /* Rx buffer size should be in multiples of 128bytes */ #define RCV_FRAG_LEN1(x) \ ((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \ @@ -56,6 +59,9 @@ */ #define CQ_QCOUNT_DEFAULT 1 +#define CQ_OP_STAT_OP_ERR 63 +#define CQ_OP_STAT_CQ_ERR 46 + struct queue_stats { u64 bytes; u64 pkts; @@ -96,7 +102,8 @@ struct otx2_snd_queue { enum cq_type { CQ_RX, CQ_TX, - CQS_PER_CINT = 2, /* RQ + SQ */ + CQ_XDP, + CQS_PER_CINT = 3, /* RQ + SQ + XDP */ }; struct otx2_cq_poll { @@ -122,9 +129,12 @@ struct otx2_cq_queue { u16 pool_ptrs; u32 cqe_cnt; u32 cq_head; + u32 cq_tail; + u32 pend_cqe; void *cqe_base; struct qmem *cqe; struct otx2_pool *rbpool; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; struct otx2_qset { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 03b4ec630432..980219a7e6fe 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -8,9 +8,11 @@ #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/pci.h> +#include <linux/net_tstamp.h> #include "otx2_common.h" #include "otx2_reg.h" +#include "otx2_ptp.h" #include "cn10k.h" #define DRV_NAME "rvu_nicvf" @@ -500,6 +502,7 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, + .ndo_do_ioctl = otx2_ioctl, }; static int otx2_wq_init(struct otx2_nic *vf) @@ -583,6 +586,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) hw->rx_queues = qcount; hw->tx_queues = qcount; hw->max_queues = qcount; + hw->tot_tx_queues = qcount; hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE, GFP_KERNEL); @@ -640,6 +644,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; + /* Don't check for error. Proceed without ptp */ + otx2_ptp_init(vf); + /* Assign default mac address */ otx2_get_mac_from_af(netdev); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c index 68b442eb6d69..06279cd6da67 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -345,8 +345,6 @@ static struct prestera_trap prestera_trap_items_arr[] = { }, }; -static void prestera_devlink_traps_fini(struct prestera_switch *sw); - static int prestera_drop_counter_get(struct devlink *devlink, const struct devlink_trap *trap, u64 *p_drops); @@ -381,8 +379,6 @@ static int prestera_trap_action_set(struct devlink *devlink, enum devlink_trap_action action, struct netlink_ext_ack *extack); -static int prestera_devlink_traps_register(struct prestera_switch *sw); - static const struct devlink_ops prestera_dl_ops = { .info_get = prestera_dl_info_get, .trap_init = prestera_trap_init, @@ -407,38 +403,18 @@ void prestera_devlink_free(struct prestera_switch *sw) devlink_free(dl); } -int prestera_devlink_register(struct prestera_switch *sw) +void prestera_devlink_register(struct prestera_switch *sw) { struct devlink *dl = priv_to_devlink(sw); - int err; - - err = devlink_register(dl); - if (err) { - dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err); - return err; - } - err = prestera_devlink_traps_register(sw); - if (err) { - devlink_unregister(dl); - dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n", - err); - return err; - } - - return 0; + devlink_register(dl); } void prestera_devlink_unregister(struct prestera_switch *sw) { - struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); - prestera_devlink_traps_fini(sw); devlink_unregister(dl); - - kfree(trap_data->trap_items_arr); - kfree(trap_data); } int prestera_devlink_port_register(struct prestera_port *port) @@ -486,7 +462,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev) return &port->dl_port; } -static int prestera_devlink_traps_register(struct prestera_switch *sw) +int prestera_devlink_traps_register(struct prestera_switch *sw) { const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr); const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr); @@ -625,8 +601,9 @@ static int prestera_drop_counter_get(struct devlink *devlink, cpu_code_type, p_drops); } -static void prestera_devlink_traps_fini(struct prestera_switch *sw) +void prestera_devlink_traps_unregister(struct prestera_switch *sw) { + struct prestera_trap_data *trap_data = sw->trap_data; struct devlink *dl = priv_to_devlink(sw); const struct devlink_trap *trap; int i; @@ -638,4 +615,6 @@ static void prestera_devlink_traps_fini(struct prestera_switch *sw) devlink_trap_groups_unregister(dl, prestera_trap_groups_arr, ARRAY_SIZE(prestera_trap_groups_arr)); + kfree(trap_data->trap_items_arr); + kfree(trap_data); } diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h index cc34c3db13a2..b322295bad3a 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -9,7 +9,7 @@ struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev); void prestera_devlink_free(struct prestera_switch *sw); -int prestera_devlink_register(struct prestera_switch *sw); +void prestera_devlink_register(struct prestera_switch *sw); void prestera_devlink_unregister(struct prestera_switch *sw); int prestera_devlink_port_register(struct prestera_port *port); @@ -22,5 +22,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev); void prestera_devlink_trap_report(struct prestera_port *port, struct sk_buff *skb, u8 cpu_code); +int prestera_devlink_traps_register(struct prestera_switch *sw); +void prestera_devlink_traps_unregister(struct prestera_switch *sw); #endif /* _PRESTERA_DEVLINK_H_ */ diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 44c670807fb3..78a7a00bce22 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -851,7 +851,7 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_span_init; - err = prestera_devlink_register(sw); + err = prestera_devlink_traps_register(sw); if (err) goto err_dl_register; @@ -863,12 +863,13 @@ static int prestera_switch_init(struct prestera_switch *sw) if (err) goto err_ports_create; + prestera_devlink_register(sw); return 0; err_ports_create: prestera_lag_fini(sw); err_lag_init: - prestera_devlink_unregister(sw); + prestera_devlink_traps_unregister(sw); err_dl_register: prestera_span_fini(sw); err_span_init: @@ -888,9 +889,10 @@ err_swdev_register: static void prestera_switch_fini(struct prestera_switch *sw) { + prestera_devlink_unregister(sw); prestera_destroy_ports(sw); prestera_lag_fini(sw); - prestera_devlink_unregister(sw); + prestera_devlink_traps_unregister(sw); prestera_span_fini(sw); prestera_acl_fini(sw); prestera_event_handlers_unregister(sw); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index e9fc74e54b22..3cb9c1271328 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4440,86 +4440,6 @@ static const struct ethtool_ops sky2_ethtool_ops = { static struct dentry *sky2_debug; - -/* - * Read and parse the first part of Vital Product Data - */ -#define VPD_SIZE 128 -#define VPD_MAGIC 0x82 - -static const struct vpd_tag { - char tag[2]; - char *label; -} vpd_tags[] = { - { "PN", "Part Number" }, - { "EC", "Engineering Level" }, - { "MN", "Manufacturer" }, - { "SN", "Serial Number" }, - { "YA", "Asset Tag" }, - { "VL", "First Error Log Message" }, - { "VF", "Second Error Log Message" }, - { "VB", "Boot Agent ROM Configuration" }, - { "VE", "EFI UNDI Configuration" }, -}; - -static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw) -{ - size_t vpd_size; - loff_t offs; - u8 len; - unsigned char *buf; - u16 reg2; - - reg2 = sky2_pci_read16(hw, PCI_DEV_REG2); - vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); - - seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev)); - buf = kmalloc(vpd_size, GFP_KERNEL); - if (!buf) { - seq_puts(seq, "no memory!\n"); - return; - } - - if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) { - seq_puts(seq, "VPD read failed\n"); - goto out; - } - - if (buf[0] != VPD_MAGIC) { - seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]); - goto out; - } - len = buf[1]; - if (len == 0 || len > vpd_size - 4) { - seq_printf(seq, "Invalid id length: %d\n", len); - goto out; - } - - seq_printf(seq, "%.*s\n", len, buf + 3); - offs = len + 3; - - while (offs < vpd_size - 4) { - int i; - - if (!memcmp("RW", buf + offs, 2)) /* end marker */ - break; - len = buf[offs + 2]; - if (offs + len + 3 >= vpd_size) - break; - - for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) { - if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) { - seq_printf(seq, " %s: %.*s\n", - vpd_tags[i].label, len, buf + offs + 3); - break; - } - } - offs += len + 3; - } -out: - kfree(buf); -} - static int sky2_debug_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; @@ -4529,9 +4449,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) unsigned idx, last; int sop; - sky2_show_vpd(seq, hw); - - seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n", + seq_printf(seq, "IRQ src=%x mask=%x control=%x\n", sky2_read32(hw, B0_ISRC), sky2_read32(hw, B0_IMSK), sky2_read32(hw, B0_Y2_SP_ICR)); diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index f7053a74e6a8..4d4f9cf9facb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -314,7 +314,8 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) buf += PAGE_SIZE; } } else { - err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ? + err = copy_to_user((void __user *)buf, init_ents, + array_size(entries, cqe_size)) ? -EFAULT : 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ef518b1040f7..66c8ae29bc7a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -197,6 +197,8 @@ static const char main_strings[][ETH_GSTRING_LEN] = { /* xdp statistics */ "rx_xdp_drop", + "rx_xdp_redirect", + "rx_xdp_redirect_fail", "rx_xdp_tx", "rx_xdp_tx_full", @@ -428,6 +430,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, data[index++] = priv->rx_ring[i]->bytes; data[index++] = priv->rx_ring[i]->dropped; data[index++] = priv->rx_ring[i]->xdp_drop; + data[index++] = priv->rx_ring[i]->xdp_redirect; + data[index++] = priv->rx_ring[i]->xdp_redirect_fail; data[index++] = priv->rx_ring[i]->xdp_tx; data[index++] = priv->rx_ring[i]->xdp_tx_full; } @@ -520,6 +524,10 @@ static void mlx4_en_get_strings(struct net_device *dev, sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_drop", i); sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_redirect", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_xdp_redirect_fail", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_tx", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_xdp_tx_full", i); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 0158b88bea5b..532997eba698 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -244,6 +244,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_complete = 0; priv->port_stats.rx_alloc_pages = 0; priv->xdp_stats.rx_xdp_drop = 0; + priv->xdp_stats.rx_xdp_redirect = 0; + priv->xdp_stats.rx_xdp_redirect_fail = 0; priv->xdp_stats.rx_xdp_tx = 0; priv->xdp_stats.rx_xdp_tx_full = 0; for (i = 0; i < priv->rx_ring_num; i++) { @@ -255,6 +257,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages); priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); + priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect); + priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail); priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx); priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 7f6d3b82c29b..650e6a1844ae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct bpf_prog *xdp_prog; int cq_ring = cq->ring; bool doorbell_pending; + bool xdp_redir_flush; struct mlx4_cqe *cqe; struct xdp_buff xdp; int polled = 0; @@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud xdp_prog = rcu_dereference_bh(ring->xdp_prog); xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq); doorbell_pending = false; + xdp_redir_flush = false; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of @@ -790,6 +792,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud switch (act) { case XDP_PASS: break; + case XDP_REDIRECT: + if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) { + ring->xdp_redirect++; + xdp_redir_flush = true; + frags[0].page = NULL; + goto next; + } + ring->xdp_redirect_fail++; + trace_xdp_exception(dev, xdp_prog, act); + goto xdp_drop_no_cnt; case XDP_TX: if (likely(!mlx4_en_xmit_frame(ring, frags, priv, length, cq_ring, @@ -897,6 +909,9 @@ next: break; } + if (xdp_redir_flush) + xdp_do_flush(); + if (likely(polled)) { if (doorbell_pending) { priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5a6b0fcaf7f8..9541f3a920c8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4015,9 +4015,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&dev->persist->interface_state_mutex); mutex_init(&dev->persist->pci_status_mutex); - ret = devlink_register(devlink); - if (ret) - goto err_persist_free; ret = devlink_params_register(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); if (ret) @@ -4027,17 +4024,15 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_params_unregister; - devlink_params_publish(devlink); - devlink_reload_enable(devlink); pci_save_state(pdev); + devlink_register(devlink); + devlink_reload_enable(devlink); return 0; err_params_unregister: devlink_params_unregister(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); err_devlink_unregister: - devlink_unregister(devlink); -err_persist_free: kfree(dev->persist); err_devlink_free: devlink_free(devlink); @@ -4141,6 +4136,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) int active_vfs = 0; devlink_reload_disable(devlink); + devlink_unregister(devlink); if (mlx4_is_slave(dev)) persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; @@ -4176,7 +4172,6 @@ static void mlx4_remove_one(struct pci_dev *pdev) mlx4_pci_disable_device(dev); devlink_params_unregister(devlink, mlx4_devlink_params, ARRAY_SIZE(mlx4_devlink_params)); - devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 6bf558c5ec10..ad0a8b488832 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -340,6 +340,8 @@ struct mlx4_en_rx_ring { unsigned long csum_complete; unsigned long rx_alloc_pages; unsigned long xdp_drop; + unsigned long xdp_redirect; + unsigned long xdp_redirect_fail; unsigned long xdp_tx; unsigned long xdp_tx_full; unsigned long dropped; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index 7b51ae8cf759..e9cd4bb6f83d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -42,9 +42,11 @@ struct mlx4_en_port_stats { struct mlx4_en_xdp_stats { unsigned long rx_xdp_drop; + unsigned long rx_xdp_redirect; + unsigned long rx_xdp_redirect_fail; unsigned long rx_xdp_tx; unsigned long rx_xdp_tx_full; -#define NUM_XDP_STATS 3 +#define NUM_XDP_STATS 5 }; struct mlx4_en_phy_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index dcf9f27ba2ef..d7576b6fa43b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -625,7 +625,6 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, value); - devlink_param_publish(devlink, &enable_eth_param); return 0; } @@ -636,7 +635,6 @@ static void mlx5_devlink_eth_param_unregister(struct devlink *devlink) if (!mlx5_eth_supported(dev)) return; - devlink_param_unpublish(devlink, &enable_eth_param); devlink_param_unregister(devlink, &enable_eth_param); } @@ -672,7 +670,6 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, value); - devlink_param_publish(devlink, &enable_rdma_param); return 0; } @@ -681,7 +678,6 @@ static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return; - devlink_param_unpublish(devlink, &enable_rdma_param); devlink_param_unregister(devlink, &enable_rdma_param); } @@ -706,7 +702,6 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink) devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, value); - devlink_param_publish(devlink, &enable_rdma_param); return 0; } @@ -717,7 +712,6 @@ static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink) if (!mlx5_vnet_supported(dev)) return; - devlink_param_unpublish(devlink, &enable_vnet_param); devlink_param_unregister(devlink, &enable_vnet_param); } @@ -799,16 +793,12 @@ int mlx5_devlink_register(struct devlink *devlink) { int err; - err = devlink_register(devlink); - if (err) - return err; - err = devlink_params_register(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); if (err) - goto params_reg_err; + return err; + mlx5_devlink_set_params_init_values(devlink); - devlink_params_publish(devlink); err = mlx5_devlink_auxdev_params_register(devlink); if (err) @@ -825,8 +815,6 @@ traps_reg_err: auxdev_reg_err: devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); -params_reg_err: - devlink_unregister(devlink); return err; } @@ -834,8 +822,6 @@ void mlx5_devlink_unregister(struct devlink *devlink) { mlx5_devlink_traps_unregister(devlink); mlx5_devlink_auxdev_params_unregister(devlink); - devlink_params_unpublish(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); - devlink_unregister(devlink); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 7b8c8187543a..f5d147c3141a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -220,8 +220,6 @@ struct mlx5e_umr_wqe { struct mlx5_mtt inline_mtts[0]; }; -extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER, MLX5E_PFLAG_TX_CQE_BASED_MODER, @@ -916,6 +914,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); +int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); void mlx5e_set_rx_mode_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index de03684528bb..398c6761eeb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -3,6 +3,7 @@ #include <net/dst_metadata.h> #include <linux/netdevice.h> +#include <linux/if_macvlan.h> #include <linux/list.h> #include <linux/rculist.h> #include <linux/rtnetlink.h> @@ -409,6 +410,13 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv) static LIST_HEAD(mlx5e_block_cb_list); +static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev) +{ + struct macvlan_dev *macvlan = netdev_priv(dev); + + return macvlan->mode == MACVLAN_MODE_PASSTHRU; +} + static int mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, struct mlx5e_rep_priv *rpriv, @@ -422,8 +430,14 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, struct flow_block_cb *block_cb; if (!mlx5e_tc_tun_device_to_offload(priv, netdev) && - !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) - return -EOPNOTSUPP; + !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)) { + if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev)) + return -EOPNOTSUPP; + if (!mlx5e_rep_macvlan_mode_supported(netdev)) { + netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode"); + return -EOPNOTSUPP; + } + } if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; @@ -647,9 +661,7 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk "Failed to restore tunnel info for sampled packet\n"); return; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_skb(skb, mapped_obj); -#endif /* CONFIG_MLX5_TC_SAMPLE */ mlx5_rep_tc_post_napi_receive(tc_priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index 625cd49ef96c..b8b481b335cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -391,7 +391,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, return 0; } -static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) { int err; @@ -399,6 +399,7 @@ static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_r if (err) mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n", mlx5e_rqt_get_rqtn(&rss->rqt), err); + return err; } void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) @@ -490,6 +491,14 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, { bool changed_indir = false; bool changed_hash = false; + struct mlx5e_rss *old_rss; + int err = 0; + + old_rss = mlx5e_rss_alloc(); + if (!old_rss) + return -ENOMEM; + + *old_rss = *rss; if (hfunc && *hfunc != rss->hash.hfunc) { switch (*hfunc) { @@ -497,7 +506,8 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, case ETH_RSS_HASH_TOP: break; default: - return -EINVAL; + err = -EINVAL; + goto out; } changed_hash = true; changed_indir = true; @@ -520,13 +530,20 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, rss->indir.table[i] = indir[i]; } - if (changed_indir && rss->enabled) - mlx5e_rss_apply(rss, rqns, num_rqns); + if (changed_indir && rss->enabled) { + err = mlx5e_rss_apply(rss, rqns, num_rqns); + if (err) { + *rss = *old_rss; + goto out; + } + } if (changed_hash) mlx5e_rss_update_tirs(rss); - return 0; +out: + mlx5e_rss_free(old_rss); + return err; } struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h index db0146df9b30..9ef8a49d7801 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h @@ -19,6 +19,8 @@ struct mlx5e_sample_attr { struct mlx5e_sample_flow *sample_flow; }; +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); struct mlx5_flow_handle * @@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act); void mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample); +#else /* CONFIG_MLX5_TC_SAMPLE */ + +static inline struct mlx5_flow_handle * +mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, + u32 tunnel_id) +{ return ERR_PTR(-EOPNOTSUPP); } + +static inline void +mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, + struct mlx5_flow_handle *rule, + struct mlx5_flow_attr *attr) {} + +static inline struct mlx5e_tc_psample * +mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act) +{ return ERR_PTR(-EOPNOTSUPP); } + +static inline void +mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {} + +static inline void +mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {} + +#endif /* CONFIG_MLX5_TC_SAMPLE */ #endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 6c949abcd2e1..225748a9e52a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -2127,12 +2127,20 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, ct_priv->post_act = post_act; mutex_init(&ct_priv->control_lock); - rhashtable_init(&ct_priv->zone_ht, &zone_params); - rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); - rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params); + if (rhashtable_init(&ct_priv->zone_ht, &zone_params)) + goto err_ct_zone_ht; + if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params)) + goto err_ct_tuples_ht; + if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params)) + goto err_ct_tuples_nat_ht; return ct_priv; +err_ct_tuples_nat_ht: + rhashtable_destroy(&ct_priv->ct_tuples_ht); +err_ct_tuples_ht: + rhashtable_destroy(&ct_priv->zone_ht); +err_ct_zone_ht: err_ct_nat_tbl: mlx5_chains_destroy_global_table(chains, ct_priv->ct); err_ct_tbl: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 306fb5d6a36d..485f208691ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -267,9 +267,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) break; case ETH_SS_TEST: - for (i = 0; i < mlx5e_self_test_num(priv); i++) - strcpy(data + i * ETH_GSTRING_LEN, - mlx5e_self_tests[i]); + mlx5e_self_test_fill_strings(priv, data); break; case ETH_SS_STATS: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index ce8ab1f01876..8c9163d2c646 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -35,30 +35,7 @@ #include <net/udp.h> #include "en.h" #include "en/port.h" - -enum { - MLX5E_ST_LINK_STATE, - MLX5E_ST_LINK_SPEED, - MLX5E_ST_HEALTH_INFO, -#ifdef CONFIG_INET - MLX5E_ST_LOOPBACK, -#endif - MLX5E_ST_NUM, -}; - -const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = { - "Link Test", - "Speed Test", - "Health Test", -#ifdef CONFIG_INET - "Loopback Test", -#endif -}; - -int mlx5e_self_test_num(struct mlx5e_priv *priv) -{ - return ARRAY_SIZE(mlx5e_self_tests); -} +#include "eswitch.h" static int mlx5e_test_health_info(struct mlx5e_priv *priv) { @@ -265,6 +242,14 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, mlx5e_refresh_tirs(priv, false, false); } +static int mlx5e_cond_loopback(struct mlx5e_priv *priv) +{ + if (is_mdev_switchdev_mode(priv->mdev)) + return -EOPNOTSUPP; + + return 0; +} + #define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200)) static int mlx5e_test_loopback(struct mlx5e_priv *priv) { @@ -313,37 +298,47 @@ out: } #endif -static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = { - mlx5e_test_link_state, - mlx5e_test_link_speed, - mlx5e_test_health_info, +typedef int (*mlx5e_st_func)(struct mlx5e_priv *); + +struct mlx5e_st { + char name[ETH_GSTRING_LEN]; + mlx5e_st_func st_func; + mlx5e_st_func cond_func; +}; + +static struct mlx5e_st mlx5e_sts[] = { + { "Link Test", mlx5e_test_link_state }, + { "Speed Test", mlx5e_test_link_speed }, + { "Health Test", mlx5e_test_health_info }, #ifdef CONFIG_INET - mlx5e_test_loopback, + { "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback }, #endif }; +#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts) + void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) { struct mlx5e_priv *priv = netdev_priv(ndev); - int i; - - memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM); + int i, count = 0; mutex_lock(&priv->state_lock); netdev_info(ndev, "Self test begin..\n"); for (i = 0; i < MLX5E_ST_NUM; i++) { - netdev_info(ndev, "\t[%d] %s start..\n", - i, mlx5e_self_tests[i]); - buf[i] = mlx5e_st_func[i](priv); - netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", - i, mlx5e_self_tests[i], buf[i]); + struct mlx5e_st st = mlx5e_sts[i]; + + if (st.cond_func && st.cond_func(priv)) + continue; + netdev_info(ndev, "\t[%d] %s start..\n", i, st.name); + buf[count] = st.st_func(priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]); } mutex_unlock(&priv->state_lock); - for (i = 0; i < MLX5E_ST_NUM; i++) { + for (i = 0; i < count; i++) { if (buf[i]) { etest->flags |= ETH_TEST_FL_FAILED; break; @@ -352,3 +347,24 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, netdev_info(ndev, "Self test out: status flags(0x%x)\n", etest->flags); } + +int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data) +{ + int i, count = 0; + + for (i = 0; i < MLX5E_ST_NUM; i++) { + struct mlx5e_st st = mlx5e_sts[i]; + + if (st.cond_func && st.cond_func(priv)) + continue; + if (data) + strcpy(data + count * ETH_GSTRING_LEN, st.name); + count++; + } + return count; +} + +int mlx5e_self_test_num(struct mlx5e_priv *priv) +{ + return mlx5e_self_test_fill_strings(priv, NULL); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ba8164792016..0e03cefc5eeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -39,6 +39,7 @@ #include <linux/rhashtable.h> #include <linux/refcount.h> #include <linux/completion.h> +#include <linux/if_macvlan.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> #include <net/psample.h> @@ -246,7 +247,6 @@ get_ct_priv(struct mlx5e_priv *priv) return priv->fs.tc.ct; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) static struct mlx5e_tc_psample * get_sample_priv(struct mlx5e_priv *priv) { @@ -263,7 +263,6 @@ get_sample_priv(struct mlx5e_priv *priv) return NULL; } -#endif struct mlx5_flow_handle * mlx5_tc_rule_insert(struct mlx5e_priv *priv, @@ -1146,11 +1145,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv), flow, spec, attr, mod_hdr_acts); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) } else if (flow_flag_test(flow, SAMPLE)) { rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr, mlx5e_tc_get_flow_tun_id(flow)); -#endif } else { rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); } @@ -1186,12 +1183,10 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, return; } -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) if (flow_flag_test(flow, SAMPLE)) { mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); return; } -#endif if (attr->esw_attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); @@ -1688,8 +1683,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv, if (opt->opt_class != htons(U16_MAX) || opt->type != U8_MAX) { - NL_SET_ERR_MSG(extack, - "Partial match of tunnel options in chain > 0 isn't supported"); + NL_SET_ERR_MSG_MOD(extack, + "Partial match of tunnel options in chain > 0 isn't supported"); netdev_warn(priv->netdev, "Partial match of tunnel options in chain > 0 isn't supported"); return -EOPNOTSUPP; @@ -1905,8 +1900,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, if ((needs_mapping || sets_mapping) && !mlx5_eswitch_reg_c1_loopback_enabled(esw)) { - NL_SET_ERR_MSG(extack, - "Chains on tunnel devices isn't supported without register loopback support"); + NL_SET_ERR_MSG_MOD(extack, + "Chains on tunnel devices isn't supported without register loopback support"); netdev_warn(priv->netdev, "Chains on tunnel devices isn't supported without register loopback support"); return -EOPNOTSUPP; @@ -2910,8 +2905,7 @@ out_err: } static int -parse_pedit_to_reformat(struct mlx5e_priv *priv, - const struct flow_action_entry *act, +parse_pedit_to_reformat(const struct flow_action_entry *act, struct mlx5e_tc_flow_parse_attr *parse_attr, struct netlink_ext_ack *extack) { @@ -2943,7 +2937,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, struct netlink_ext_ack *extack) { if (flow && flow_flag_test(flow, L3_TO_L2_DECAP)) - return parse_pedit_to_reformat(priv, act, parse_attr, extack); + return parse_pedit_to_reformat(act, parse_attr, extack); return parse_pedit_to_modify_hdr(priv, act, namespace, parse_attr, hdrs, extack); @@ -3025,10 +3019,10 @@ struct ipv6_hoplimit_word { __u8 hop_limit; }; -static int is_action_keys_supported(const struct flow_action_entry *act, - bool ct_flow, bool *modify_ip_header, - bool *modify_tuple, - struct netlink_ext_ack *extack) +static bool +is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow, + bool *modify_ip_header, bool *modify_tuple, + struct netlink_ext_ack *extack) { u32 mask, offset; u8 htype; @@ -3056,7 +3050,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv4 address with action ct"); - return -EOPNOTSUPP; + return false; } } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { struct ipv6_hoplimit_word *hoplimit_word = @@ -3074,7 +3068,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv6 address with action ct"); - return -EOPNOTSUPP; + return false; } } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) { @@ -3082,11 +3076,11 @@ static int is_action_keys_supported(const struct flow_action_entry *act, if (ct_flow) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of transport header ports with action ct"); - return -EOPNOTSUPP; + return false; } } - return 0; + return true; } static bool modify_tuple_supported(bool modify_tuple, bool ct_clear, @@ -3133,7 +3127,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, void *headers_v; u16 ethertype; u8 ip_proto; - int i, err; + int i; headers_c = get_match_headers_criteria(actions, spec); headers_v = get_match_headers_value(actions, spec); @@ -3151,11 +3145,10 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, act->id != FLOW_ACTION_ADD) continue; - err = is_action_keys_supported(act, ct_flow, - &modify_ip_header, - &modify_tuple, extack); - if (err) - return err; + if (!is_action_keys_supported(act, ct_flow, + &modify_ip_header, + &modify_tuple, extack)) + return false; } if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack, @@ -3190,6 +3183,12 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ct_flow = flow_flag_test(flow, CT) && !ct_clear; actions = flow->attr->action; + if (!(actions & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); + return false; + } + if (mlx5e_is_eswitch_flow(flow)) { if (flow->attr->esw_attr->split_count && ct_flow && !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) { @@ -3451,7 +3450,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, if (err) return err; - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: @@ -3484,17 +3484,11 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, attr->action = action; - if (attr->dest_chain) { - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); - return -EOPNOTSUPP; - } - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (attr->dest_chain && parse_attr->mirred_ifindex[0]) { + NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported"); + return -EOPNOTSUPP; } - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; @@ -3914,6 +3908,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, return err; } + if (netif_is_macvlan(out_dev)) + out_dev = macvlan_dev_real_dev(out_dev); + err = verify_uplink_forwarding(priv, flow, out_dev, extack); if (err) return err; @@ -3998,7 +3995,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (err) return err; - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = act->chain_index; break; case FLOW_ACTION_CT: @@ -4068,30 +4066,17 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; - if (attr->dest_chain) { - if (decap) { - /* It can be supported if we'll create a mapping for - * the tunnel device only (without tunnel), and set - * this tunnel id with this decap flow. - * - * On restore (miss), we'll just set this saved tunnel - * device. - */ - - NL_SET_ERR_MSG(extack, - "Decap with goto isn't supported"); - netdev_warn(priv->netdev, - "Decap with goto isn't supported"); - return -EOPNOTSUPP; - } - - attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - } + if (attr->dest_chain && decap) { + /* It can be supported if we'll create a mapping for + * the tunnel device only (without tunnel), and set + * this tunnel id with this decap flow. + * + * On restore (miss), we'll just set this saved tunnel + * device. + */ - if (!(attr->action & - (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { - NL_SET_ERR_MSG_MOD(extack, - "Rule must have at least one forward/drop action"); + NL_SET_ERR_MSG(extack, "Decap with goto isn't supported"); + netdev_warn(priv->netdev, "Decap with goto isn't supported"); return -EOPNOTSUPP; } @@ -5006,9 +4991,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) MLX5_FLOW_NAMESPACE_FDB, uplink_priv->post_act); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); -#endif mapping_id = mlx5_query_nic_system_image_guid(esw->dev); @@ -5052,9 +5035,7 @@ err_ht_init: err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); err_tun_mapping: -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); -#endif mlx5_tc_ct_clean(uplink_priv->ct_priv); netdev_warn(priv->netdev, "Failed to initialize tc (eswitch), err: %d", err); @@ -5074,9 +5055,7 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); mapping_destroy(uplink_priv->tunnel_mapping); -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); -#endif mlx5_tc_ct_clean(uplink_priv->ct_priv); mlx5e_tc_post_act_destroy(uplink_priv->post_act); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 79482824c64f..92b08fa07efa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1537,6 +1537,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err); pci_save_state(pdev); + devlink_register(devlink); if (!mlx5_core_is_mp_slave(dev)) devlink_reload_enable(devlink); return 0; @@ -1559,6 +1560,7 @@ static void remove_one(struct pci_dev *pdev) struct devlink *devlink = priv_to_devlink(dev); devlink_reload_disable(devlink); + devlink_unregister(devlink); mlx5_crdump_disable(dev); mlx5_drain_health_wq(dev); mlx5_uninit_one(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 052f48068dc1..3cf272fa2164 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -46,6 +46,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err); goto init_one_err; } + devlink_register(devlink); devlink_reload_enable(devlink); return 0; @@ -65,6 +66,7 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev) devlink = priv_to_devlink(sf_dev->mdev); devlink_reload_disable(devlink); + devlink_unregister(devlink); mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); mlx5_mdev_uninit(sf_dev->mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index b2481c99da79..33e6299026f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -1791,7 +1791,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, else return -EINVAL; - misc->source_eswitch_owner_vhca_id = 0; + misc->source_eswitch_owner_vhca_id = 0; } else { caps = &dmn->info.caps; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f080fab3de2b..9e831e8b607a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -90,7 +90,6 @@ struct mlxsw_core { struct devlink_health_reporter *fw_fatal; } health; struct mlxsw_env *env; - bool is_initialized; /* Denotes if core was already initialized. */ unsigned long driver_priv[]; /* driver_priv has to be always the last item */ }; @@ -1975,12 +1974,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_emad_init; if (!reload) { - err = devlink_register(devlink); - if (err) - goto err_devlink_register; - } - - if (!reload) { err = mlxsw_core_params_register(mlxsw_core); if (err) goto err_register_params; @@ -1995,12 +1988,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_health_init; - if (mlxsw_driver->init) { - err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); - if (err) - goto err_driver_init; - } - err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); if (err) goto err_hwmon_init; @@ -2014,31 +2001,32 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_env_init; - mlxsw_core->is_initialized = true; - devlink_params_publish(devlink); + if (mlxsw_driver->init) { + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack); + if (err) + goto err_driver_init; + } - if (!reload) + if (!reload) { + devlink_register(devlink); devlink_reload_enable(devlink); + } return 0; +err_driver_init: + mlxsw_env_fini(mlxsw_core->env); err_env_init: mlxsw_thermal_fini(mlxsw_core->thermal); err_thermal_init: mlxsw_hwmon_fini(mlxsw_core->hwmon); err_hwmon_init: - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); -err_driver_init: mlxsw_core_health_fini(mlxsw_core); err_health_init: err_fw_rev_validate: if (!reload) mlxsw_core_params_unregister(mlxsw_core); err_register_params: - if (!reload) - devlink_unregister(devlink); -err_devlink_register: mlxsw_emad_fini(mlxsw_core); err_emad_init: kfree(mlxsw_core->lag.mapping); @@ -2087,8 +2075,10 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, { struct devlink *devlink = priv_to_devlink(mlxsw_core); - if (!reload) + if (!reload) { devlink_reload_disable(devlink); + devlink_unregister(devlink); + } if (devlink_is_reload_failed(devlink)) { if (!reload) /* Only the parts that were not de-initialized in the @@ -2099,18 +2089,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; } - devlink_params_unpublish(devlink); - mlxsw_core->is_initialized = false; + if (mlxsw_core->driver->fini) + mlxsw_core->driver->fini(mlxsw_core); mlxsw_env_fini(mlxsw_core->env); mlxsw_thermal_fini(mlxsw_core->thermal); mlxsw_hwmon_fini(mlxsw_core->hwmon); - if (mlxsw_core->driver->fini) - mlxsw_core->driver->fini(mlxsw_core); mlxsw_core_health_fini(mlxsw_core); if (!reload) mlxsw_core_params_unregister(mlxsw_core); - if (!reload) - devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); kfree(mlxsw_core->lag.mapping); mlxsw_ports_fini(mlxsw_core, reload); @@ -2124,7 +2110,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, reload_fail_deinit: mlxsw_core_params_unregister(mlxsw_core); - devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); devlink_free(devlink); } @@ -2939,49 +2924,6 @@ struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core) return mlxsw_core->env; } -bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core) -{ - return mlxsw_core->is_initialized; -} - -int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module) -{ - enum mlxsw_reg_pmtm_module_type module_type; - char pmtm_pl[MLXSW_REG_PMTM_LEN]; - int err; - - mlxsw_reg_pmtm_pack(pmtm_pl, module); - err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl); - if (err) - return err; - mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type); - - /* Here we need to get the module width according to the module type. */ - - switch (module_type) { - case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X: - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD: - case MLXSW_REG_PMTM_MODULE_TYPE_OSFP: - return 8; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X: - case MLXSW_REG_PMTM_MODULE_TYPE_QSFP: - return 4; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X: - case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD: - case MLXSW_REG_PMTM_MODULE_TYPE_DSFP: - return 2; - case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X: - case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X: - case MLXSW_REG_PMTM_MODULE_TYPE_SFP: - return 1; - default: - return -EINVAL; - } -} -EXPORT_SYMBOL(mlxsw_core_module_max_width); - static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, const char *buf, size_t size) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 80712dc803d0..12023a550007 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -249,8 +249,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core, u8 local_port); bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port); struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core); -bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core); -int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module); int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); bool mlxsw_core_schedule_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 3713c45cfa1e..9e367174743d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -5,6 +5,7 @@ #include <linux/err.h> #include <linux/ethtool.h> #include <linux/sfp.h> +#include <linux/mutex.h> #include "core.h" #include "core_env.h" @@ -14,12 +15,14 @@ struct mlxsw_env_module_info { u64 module_overheat_counter; bool is_overheat; + int num_ports_mapped; + int num_ports_up; }; struct mlxsw_env { struct mlxsw_core *core; u8 module_count; - spinlock_t module_info_lock; /* Protects 'module_info'. */ + struct mutex module_info_lock; /* Protects 'module_info'. */ struct mlxsw_env_module_info module_info[]; }; @@ -389,6 +392,59 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, } EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page); +static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module) +{ + char pmaos_pl[MLXSW_REG_PMAOS_LEN]; + + mlxsw_reg_pmaos_pack(pmaos_pl, module); + mlxsw_reg_pmaos_rst_set(pmaos_pl, true); + + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); +} + +int mlxsw_env_reset_module(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, u8 module, u32 *flags) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + u32 req = *flags; + int err; + + if (!(req & ETH_RESET_PHY) && + !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) + return 0; + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + + if (mlxsw_env->module_info[module].num_ports_up) { + netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n"); + err = -EINVAL; + goto out; + } + + if (mlxsw_env->module_info[module].num_ports_mapped > 1 && + !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) { + netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n"); + err = -EINVAL; + goto out; + } + + err = mlxsw_env_module_reset(mlxsw_core, module); + if (err) { + netdev_err(netdev, "Failed to reset module\n"); + goto out; + } + + *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)); + +out: + mutex_unlock(&mlxsw_env->module_info_lock); + return err; +} +EXPORT_SYMBOL(mlxsw_env_reset_module); + static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core, u8 module, bool *p_has_temp_sensor) @@ -482,22 +538,32 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core, return 0; } -static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, - char *mtwe_pl, void *priv) +struct mlxsw_env_module_temp_warn_event { + struct mlxsw_env *mlxsw_env; + char mtwe_pl[MLXSW_REG_MTWE_LEN]; + struct work_struct work; +}; + +static void mlxsw_env_mtwe_event_work(struct work_struct *work) { - struct mlxsw_env *mlxsw_env = priv; + struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env *mlxsw_env; int i, sensor_warning; bool is_overheat; + event = container_of(work, struct mlxsw_env_module_temp_warn_event, + work); + mlxsw_env = event->mlxsw_env; + for (i = 0; i < mlxsw_env->module_count; i++) { /* 64-127 of sensor_index are mapped to the port modules * sequentially (module 0 is mapped to sensor_index 64, * module 1 to sensor_index 65 and so on) */ sensor_warning = - mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl, + mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl, i + MLXSW_REG_MTMP_MODULE_INDEX_MIN); - spin_lock(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); is_overheat = mlxsw_env->module_info[i].is_overheat; @@ -507,13 +573,13 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, * warning OR current state in "no warning" and MTWE * does not report warning. */ - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); continue; } else if (is_overheat && !sensor_warning) { /* MTWE reports "no warning", turn is_overheat off. */ mlxsw_env->module_info[i].is_overheat = false; - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); } else { /* Current state is "no warning" and MTWE reports * "warning", increase the counter and turn is_overheat @@ -521,13 +587,32 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg, */ mlxsw_env->module_info[i].is_overheat = true; mlxsw_env->module_info[i].module_overheat_counter++; - spin_unlock(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); } } + + kfree(event); +} + +static void +mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl, + void *priv) +{ + struct mlxsw_env_module_temp_warn_event *event; + struct mlxsw_env *mlxsw_env = priv; + + event = kmalloc(sizeof(*event), GFP_ATOMIC); + if (!event) + return; + + event->mlxsw_env = mlxsw_env; + memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN); + INIT_WORK(&event->work, mlxsw_env_mtwe_event_work); + mlxsw_core_schedule_work(&event->work); } static const struct mlxsw_listener mlxsw_env_temp_warn_listener = - MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE); + MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE); static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core) { @@ -568,9 +653,9 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work) work); mlxsw_env = event->mlxsw_env; - spin_lock_bh(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); mlxsw_env->module_info[event->module].is_overheat = false; - spin_unlock_bh(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module, &has_temp_sensor); @@ -652,8 +737,10 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core, for (i = 0; i < module_count; i++) { char pmaos_pl[MLXSW_REG_PMAOS_LEN]; - mlxsw_reg_pmaos_pack(pmaos_pl, i, - MLXSW_REG_PMAOS_E_GENERATE_EVENT); + mlxsw_reg_pmaos_pack(pmaos_pl, i); + mlxsw_reg_pmaos_e_set(pmaos_pl, + MLXSW_REG_PMAOS_E_GENERATE_EVENT); + mlxsw_reg_pmaos_ee_set(pmaos_pl, true); err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl); if (err) return err; @@ -667,23 +754,71 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, { struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); - /* Prevent switch driver from accessing uninitialized data. */ - if (!mlxsw_core_is_initialized(mlxsw_core)) { - *p_counter = 0; - return 0; - } - if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) return -EINVAL; - spin_lock_bh(&mlxsw_env->module_info_lock); + mutex_lock(&mlxsw_env->module_info_lock); *p_counter = mlxsw_env->module_info[module].module_overheat_counter; - spin_unlock_bh(&mlxsw_env->module_info_lock); + mutex_unlock(&mlxsw_env->module_info_lock); return 0; } EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get); +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_mapped++; + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_map); + +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_mapped--; + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_unmap); + +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return -EINVAL; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_up++; + mutex_unlock(&mlxsw_env->module_info_lock); + + return 0; +} +EXPORT_SYMBOL(mlxsw_env_module_port_up); + +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + + if (WARN_ON_ONCE(module >= mlxsw_env->module_count)) + return; + + mutex_lock(&mlxsw_env->module_info_lock); + mlxsw_env->module_info[module].num_ports_up--; + mutex_unlock(&mlxsw_env->module_info_lock); +} +EXPORT_SYMBOL(mlxsw_env_module_port_down); + int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) { char mgpir_pl[MLXSW_REG_MGPIR_LEN]; @@ -702,7 +837,7 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env) if (!env) return -ENOMEM; - spin_lock_init(&env->module_info_lock); + mutex_init(&env->module_info_lock); env->core = mlxsw_core; env->module_count = module_count; *p_env = env; @@ -732,6 +867,7 @@ err_oper_state_event_enable: err_module_plug_event_register: mlxsw_env_temp_warn_event_unregister(env); err_temp_warn_event_register: + mutex_destroy(&env->module_info_lock); kfree(env); return err; } @@ -742,5 +878,6 @@ void mlxsw_env_fini(struct mlxsw_env *env) /* Make sure there is no more event work scheduled. */ mlxsw_core_flush_owq(); mlxsw_env_temp_warn_event_unregister(env); + mutex_destroy(&env->module_info_lock); kfree(env); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index 0bf5bd0f8a7e..c486397f5dfe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -24,9 +24,22 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); +int mlxsw_env_reset_module(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, u8 module, + u32 *flags); + int mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module, u64 *p_counter); + +void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module); + +void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module); + +int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module); + +void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module); + int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env); void mlxsw_env_fini(struct mlxsw_env *env); diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index d9d56c44e994..9644e9c486b8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -54,8 +54,20 @@ static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m) return 0; } -static int mlxsw_m_port_dummy_open_stop(struct net_device *dev) +static int mlxsw_m_port_open(struct net_device *dev) { + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + + return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module); +} + +static int mlxsw_m_port_stop(struct net_device *dev) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + + mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module); return 0; } @@ -70,8 +82,8 @@ mlxsw_m_port_get_devlink_port(struct net_device *dev) } static const struct net_device_ops mlxsw_m_port_netdev_ops = { - .ndo_open = mlxsw_m_port_dummy_open_stop, - .ndo_stop = mlxsw_m_port_dummy_open_stop, + .ndo_open = mlxsw_m_port_open, + .ndo_stop = mlxsw_m_port_stop, .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port, }; @@ -124,11 +136,21 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev, page, extack); } +static int mlxsw_m_reset(struct net_device *netdev, u32 *flags) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module, + flags); +} + static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { .get_drvinfo = mlxsw_m_module_get_drvinfo, .get_module_info = mlxsw_m_get_module_info, .get_module_eeprom = mlxsw_m_get_module_eeprom, .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page, + .reset = mlxsw_m_reset, }; static int @@ -266,6 +288,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, if (WARN_ON_ONCE(module >= max_ports)) return -EINVAL; + mlxsw_env_module_port_map(mlxsw_m->core, module); mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports; return 0; @@ -274,6 +297,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module) { mlxsw_m->module_to_port[module] = -1; + mlxsw_env_module_port_unmap(mlxsw_m->core, module); } static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6fbda6ebd590..c5fad3c94fac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5681,6 +5681,14 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port) MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN); +/* reg_pmaos_rst + * Module reset toggle. + * Note: Setting reset while module is plugged-in will result in transition to + * "initializing" operational state. + * Access: OP + */ +MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1); + /* reg_pmaos_slot_index * Slot index. * Access: Index @@ -5693,6 +5701,24 @@ MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4); */ MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8); +enum mlxsw_reg_pmaos_admin_status { + MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1, + MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2, + /* If the module is active and then unplugged, or experienced an error + * event, the operational status should go to "disabled" and can only + * be enabled upon explicit enable command. + */ + MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3, +}; + +/* reg_pmaos_admin_status + * Module administrative state (the desired state of the module). + * Note: To disable a module, all ports associated with the port must be + * administatively down first. + * Access: RW + */ +MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4); + /* reg_pmaos_ase * Admin state update enable. * If this bit is set, admin state will be updated based on admin_state field. @@ -5721,13 +5747,10 @@ enum mlxsw_reg_pmaos_e { */ MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2); -static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module, - enum mlxsw_reg_pmaos_e e) +static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module) { MLXSW_REG_ZERO(pmaos, payload); mlxsw_reg_pmaos_module_set(payload, module); - mlxsw_reg_pmaos_e_set(payload, e); - mlxsw_reg_pmaos_ee_set(payload, true); } /* PPLR - Port Physical Loopback Register @@ -5766,6 +5789,69 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port, MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0); } +/* PMTDB - Port Module To local DataBase Register + * ---------------------------------------------- + * The PMTDB register allows to query the possible module<->local port + * mapping than can be used in PMLP. It does not represent the actual/current + * mapping of the local to module. Actual mapping is only defined by PMLP. + */ +#define MLXSW_REG_PMTDB_ID 0x501A +#define MLXSW_REG_PMTDB_LEN 0x40 + +MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN); + +/* reg_pmtdb_slot_index + * Slot index (0: Main board). + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4); + +/* reg_pmtdb_module + * Module number. + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8); + +/* reg_pmtdb_ports_width + * Port's width + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4); + +/* reg_pmtdb_num_ports + * Number of ports in a single module (split/breakout) + * Access: Index + */ +MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4); + +enum mlxsw_reg_pmtdb_status { + MLXSW_REG_PMTDB_STATUS_SUCCESS, +}; + +/* reg_pmtdb_status + * Status + * Access: RO + */ +MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4); + +/* reg_pmtdb_port_num + * The local_port value which can be assigned to the module. + * In case of more than one port, port<x> represent the /<x> port of + * the module. + * Access: RO + */ +MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false); + +static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module, + u8 ports_width, u8 num_ports) +{ + MLXSW_REG_ZERO(pmtdb, payload); + mlxsw_reg_pmtdb_slot_index_set(payload, slot_index); + mlxsw_reg_pmtdb_module_set(payload, module); + mlxsw_reg_pmtdb_ports_width_set(payload, ports_width); + mlxsw_reg_pmtdb_num_ports_set(payload, num_ports); +} + /* PMPE - Port Module Plug/Unplug Event Register * --------------------------------------------- * This register reports any operational status change of a module. @@ -5860,67 +5946,51 @@ static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port, mlxsw_reg_pddr_page_select_set(payload, page_select); } -/* PMTM - Port Module Type Mapping Register - * ---------------------------------------- - * The PMTM allows query or configuration of module types. +/* PLLP - Port Local port to Label Port mapping Register + * ----------------------------------------------------- + * The PLLP register returns the mapping from Local Port into Label Port. */ -#define MLXSW_REG_PMTM_ID 0x5067 -#define MLXSW_REG_PMTM_LEN 0x10 +#define MLXSW_REG_PLLP_ID 0x504A +#define MLXSW_REG_PLLP_LEN 0x10 -MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN); +MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN); -/* reg_pmtm_module - * Module number. +/* reg_pllp_local_port + * Local port number. * Access: Index */ -MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8); +MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8); -enum mlxsw_reg_pmtm_module_type { - /* Backplane with 4 lanes */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_4X, - /* QSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_QSFP, - /* SFP */ - MLXSW_REG_PMTM_MODULE_TYPE_SFP, - /* Backplane with single lane */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4, - /* Backplane with two lane */ - MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8, - /* Chip2Chip4x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10, - /* Chip2Chip2x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C2X, - /* Chip2Chip1x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C1X, - /* QSFP-DD */ - MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14, - /* OSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_OSFP, - /* SFP-DD */ - MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD, - /* DSFP */ - MLXSW_REG_PMTM_MODULE_TYPE_DSFP, - /* Chip2Chip8x */ - MLXSW_REG_PMTM_MODULE_TYPE_C2C8X, -}; +/* reg_pllp_label_port + * Front panel label of the port. + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8); -/* reg_pmtm_module_type - * Module type. - * Access: RW +/* reg_pllp_split_num + * Label split mapping for local_port. + * Access: RO + */ +MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4); + +/* reg_pllp_slot_index + * Slot index (0: Main board). + * Access: RO */ -MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4); +MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4); -static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module) +static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port) { - MLXSW_REG_ZERO(pmtm, payload); - mlxsw_reg_pmtm_module_set(payload, module); + MLXSW_REG_ZERO(pllp, payload); + mlxsw_reg_pllp_local_port_set(payload, local_port); } -static inline void -mlxsw_reg_pmtm_unpack(char *payload, - enum mlxsw_reg_pmtm_module_type *module_type) +static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port, + u8 *split_num, u8 *slot_index) { - *module_type = mlxsw_reg_pmtm_module_type_get(payload); + *label_port = mlxsw_reg_pllp_label_port_get(payload); + *split_num = mlxsw_reg_pllp_split_num_get(payload); + *slot_index = mlxsw_reg_pllp_slot_index_get(payload); } /* HTGT - Host Trap Group Table @@ -6664,6 +6734,23 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload, mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip); } +static inline void +mlxsw_reg_ritr_loopback_ipip6_pack(char *payload, + enum mlxsw_reg_ritr_loopback_ipip_type ipip_type, + enum mlxsw_reg_ritr_loopback_ipip_options options, + u16 uvr_id, u16 underlay_rif, + const struct in6_addr *usip, u32 gre_key) +{ + enum mlxsw_reg_ritr_loopback_protocol protocol = + MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6; + + mlxsw_reg_ritr_loopback_protocol_set(payload, protocol); + mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options, + uvr_id, underlay_rif, gre_key); + mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload, + (const char *)usip); +} + /* RTAR - Router TCAM Allocation Register * -------------------------------------- * This register is used for allocation of regions in the TCAM table. @@ -6932,6 +7019,12 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip) mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip); } +static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr) +{ + mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6); + mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr); +} + static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index, bool counter_enable) { @@ -8117,19 +8210,71 @@ static inline void mlxsw_reg_rtdp_pack(char *payload, } static inline void -mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, - enum mlxsw_reg_rtdp_ipip_sip_check sip_check, - unsigned int type_check, bool gre_key_check, - u32 ipv4_usip, u32 expected_gre_key) +mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 expected_gre_key) { mlxsw_reg_rtdp_ipip_irif_set(payload, irif); mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check); mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check); mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check); - mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip); mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); } +static inline void +mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 ipv4_usip, u32 expected_gre_key) +{ + mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check, + gre_key_check, expected_gre_key); + mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip); +} + +static inline void +mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif, + enum mlxsw_reg_rtdp_ipip_sip_check sip_check, + unsigned int type_check, bool gre_key_check, + u32 ipv6_usip_ptr, u32 expected_gre_key) +{ + mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check, + gre_key_check, expected_gre_key); + mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr); +} + +/* RIPS - Router IP version Six Register + * ------------------------------------- + * The RIPS register is used to store IPv6 addresses for use by the NVE and + * IPinIP + */ +#define MLXSW_REG_RIPS_ID 0x8021 +#define MLXSW_REG_RIPS_LEN 0x14 + +MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN); + +/* reg_rips_index + * Index to IPv6 address. + * For Spectrum, the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24); + +/* reg_rips_ipv6 + * IPv6 address + * Access: RW + */ +MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16); + +static inline void mlxsw_reg_rips_pack(char *payload, u32 index, + const struct in6_addr *ipv6) +{ + MLXSW_REG_ZERO(rips, payload); + mlxsw_reg_rips_index_set(payload, index); + mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6); +} + /* RATRAD - Router Adjacency Table Activity Dump Register * ------------------------------------------------------ * The RATRAD register is used to dump and optionally clear activity bits of @@ -12200,9 +12345,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pspa), MLXSW_REG(pmaos), MLXSW_REG(pplr), + MLXSW_REG(pmtdb), MLXSW_REG(pmpe), MLXSW_REG(pddr), - MLXSW_REG(pmtm), + MLXSW_REG(pllp), MLXSW_REG(htgt), MLXSW_REG(hpkt), MLXSW_REG(rgcr), @@ -12210,6 +12356,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), + MLXSW_REG(rips), MLXSW_REG(ratrad), MLXSW_REG(rdpm), MLXSW_REG(ricnt), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index a56c9e19a390..a1512be77867 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -25,9 +25,6 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_SYSTEM_PORT, MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, - MLXSW_RES_ID_LOCAL_PORTS_IN_1X, - MLXSW_RES_ID_LOCAL_PORTS_IN_2X, - MLXSW_RES_ID_LOCAL_PORTS_IN_4X, MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER, MLXSW_RES_ID_CELL_SIZE, MLXSW_RES_ID_MAX_HEADROOM_SIZE, @@ -84,9 +81,6 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, - [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610, - [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611, - [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612, [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 250c5a24264d..0e81ae723bc8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -47,7 +47,7 @@ #define MLXSW_SP1_FWREV_MAJOR 13 #define MLXSW_SP1_FWREV_MINOR 2008 -#define MLXSW_SP1_FWREV_SUBMINOR 2406 +#define MLXSW_SP1_FWREV_SUBMINOR 3326 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -64,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { #define MLXSW_SP2_FWREV_MAJOR 29 #define MLXSW_SP2_FWREV_MINOR 2008 -#define MLXSW_SP2_FWREV_SUBMINOR 2406 +#define MLXSW_SP2_FWREV_SUBMINOR 3326 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, @@ -79,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { #define MLXSW_SP3_FWREV_MAJOR 30 #define MLXSW_SP3_FWREV_MINOR 2008 -#define MLXSW_SP3_FWREV_SUBMINOR 2406 +#define MLXSW_SP3_FWREV_SUBMINOR 3326 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { .major = MLXSW_SP3_FWREV_MAJOR, @@ -351,12 +351,12 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); } -static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) +static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 swid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pspa_pl[MLXSW_REG_PSPA_LEN]; - mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); + mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); } @@ -529,55 +529,80 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port, port_mapping->module = module; port_mapping->width = width; + port_mapping->module_width = width; port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); return 0; } -static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port) +static int +mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const struct mlxsw_sp_port_mapping *port_mapping) { - struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; - int i; + int i, err; + + mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module); - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width); for (i = 0; i < port_mapping->width; i++) { mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module); mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */ } - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + goto err_pmlp_write; + return 0; + +err_pmlp_write: + mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module); + return err; } -static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port) +static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 module) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); mlxsw_reg_pmlp_width_set(pmlp_pl, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + mlxsw_env_module_port_unmap(mlxsw_sp->core, module); } static int mlxsw_sp_port_open(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + err = mlxsw_env_module_port_up(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); if (err) return err; + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + if (err) + goto err_port_admin_status_set; netif_start_queue(dev); return 0; + +err_port_admin_status_set: + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); + return err; } static int mlxsw_sp_port_stop(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; netif_stop_queue(dev); - return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + mlxsw_env_module_port_down(mlxsw_sp->core, + mlxsw_sp_port->mapping.module); + return 0; } static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, @@ -1442,29 +1467,68 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl); } +static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *port_number, + u8 *split_port_subnumber, + u8 *slot_index) +{ + char pllp_pl[MLXSW_REG_PLLP_LEN]; + int err; + + mlxsw_reg_pllp_pack(pllp_pl, local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl); + if (err) + return err; + mlxsw_reg_pllp_unpack(pllp_pl, port_number, + split_port_subnumber, slot_index); + return 0; +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, - u8 split_base_local_port, + bool split, struct mlxsw_sp_port_mapping *port_mapping) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - bool split = !!split_base_local_port; struct mlxsw_sp_port *mlxsw_sp_port; u32 lanes = port_mapping->width; + u8 split_port_subnumber; struct net_device *dev; + u8 port_number; + u8 slot_index; bool splittable; int err; + err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", + local_port); + return err; + } + + err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", + local_port); + goto err_port_swid_set; + } + + err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number, + &split_port_subnumber, &slot_index); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n", + local_port); + goto err_port_label_info_get; + } + splittable = lanes > 1 && !split; err = mlxsw_core_port_init(mlxsw_sp->core, local_port, - port_mapping->module + 1, split, - port_mapping->lane / lanes, - splittable, lanes, - mlxsw_sp->base_mac, + port_number, split, split_port_subnumber, + splittable, lanes, mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac)); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", local_port); - return err; + goto err_core_port_init; } dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); @@ -1480,7 +1544,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_sp_port->local_port = local_port; mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; mlxsw_sp_port->split = split; - mlxsw_sp_port->split_base_local_port = split_base_local_port; mlxsw_sp_port->mapping = *port_mapping; mlxsw_sp_port->link.autoneg = 1; INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list); @@ -1498,20 +1561,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, dev->netdev_ops = &mlxsw_sp_port_netdev_ops; dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; - err = mlxsw_sp_port_module_map(mlxsw_sp_port); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n", - mlxsw_sp_port->local_port); - goto err_port_module_map; - } - - err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", - mlxsw_sp_port->local_port); - goto err_port_swid_set; - } - err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", @@ -1712,21 +1761,24 @@ err_max_speed_get: err_port_speed_by_width_set: err_port_system_port_mapping_set: err_dev_addr_init: - mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); -err_port_swid_set: - mlxsw_sp_port_module_unmap(mlxsw_sp_port); -err_port_module_map: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: free_netdev(dev); err_alloc_etherdev: mlxsw_core_port_fini(mlxsw_sp->core, local_port); +err_core_port_init: +err_port_label_info_get: + mlxsw_sp_port_swid_set(mlxsw_sp, local_port, + MLXSW_PORT_SWID_DISABLED_PORT); +err_port_swid_set: + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module); return err; } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + u8 module = mlxsw_sp_port->mapping.module; cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw); @@ -1742,12 +1794,13 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false); mlxsw_sp_port_buffers_fini(mlxsw_sp_port); - mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); - mlxsw_sp_port_module_unmap(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); free_netdev(mlxsw_sp_port->dev); mlxsw_core_port_fini(mlxsw_sp->core, local_port); + mlxsw_sp_port_swid_set(mlxsw_sp, local_port, + MLXSW_PORT_SWID_DISABLED_PORT); + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module); } static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp) @@ -1789,8 +1842,15 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp_port); } +static bool mlxsw_sp_local_port_valid(u8 local_port) +{ + return local_port != MLXSW_PORT_CPU_PORT; +} + static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port) { + if (!mlxsw_sp_local_port_valid(local_port)) + return false; return mlxsw_sp->ports[local_port] != NULL; } @@ -1827,7 +1887,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) port_mapping = mlxsw_sp->port_mapping[i]; if (!port_mapping) continue; - err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping); + err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping); if (err) goto err_port_create; } @@ -1894,17 +1954,10 @@ static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->port_mapping); } -static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width) -{ - u8 offset = (local_port - 1) % max_width; - - return local_port - offset; -} - static int -mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, +mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port_mapping *port_mapping, - unsigned int count, u8 offset) + unsigned int count, const char *pmtdb_pl) { struct mlxsw_sp_port_mapping split_port_mapping; int err, i; @@ -1912,8 +1965,13 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, split_port_mapping = *port_mapping; split_port_mapping.width /= count; for (i = 0; i < count; i++) { - err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset, - base_port, &split_port_mapping); + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + if (!mlxsw_sp_local_port_valid(s_local_port)) + continue; + + err = mlxsw_sp_port_create(mlxsw_sp, s_local_port, + true, &split_port_mapping); if (err) goto err_port_create; split_port_mapping.lane += split_port_mapping.width; @@ -1922,49 +1980,34 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, return 0; err_port_create: - for (i--; i >= 0; i--) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); + for (i--; i >= 0; i--) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); + } return err; } static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, - u8 base_port, - unsigned int count, u8 offset) + unsigned int count, + const char *pmtdb_pl) { struct mlxsw_sp_port_mapping *port_mapping; int i; /* Go over original unsplit ports in the gap and recreate them. */ - for (i = 0; i < count * offset; i++) { - port_mapping = mlxsw_sp->port_mapping[base_port + i]; - if (!port_mapping) + for (i = 0; i < count; i++) { + u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); + + port_mapping = mlxsw_sp->port_mapping[local_port]; + if (!port_mapping || !mlxsw_sp_local_port_valid(local_port)) continue; - mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping); + mlxsw_sp_port_create(mlxsw_sp, local_port, + false, port_mapping); } } -static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core, - unsigned int count, - unsigned int max_width) -{ - enum mlxsw_res_id local_ports_in_x_res_id; - int split_width = max_width / count; - - if (split_width == 1) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X; - else if (split_width == 2) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X; - else if (split_width == 4) - local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X; - else - return -EINVAL; - - if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id)) - return -EINVAL; - return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); -} - static struct mlxsw_sp_port * mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) { @@ -1980,9 +2023,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port_mapping port_mapping; struct mlxsw_sp_port *mlxsw_sp_port; - int max_width; - u8 base_port; - int offset; + enum mlxsw_reg_pmtdb_status status; + char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; int i; int err; @@ -1994,57 +2036,37 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return -EINVAL; } - max_width = mlxsw_core_module_max_width(mlxsw_core, - mlxsw_sp_port->mapping.module); - if (max_width < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); - return max_width; + if (mlxsw_sp_port->split) { + NL_SET_ERR_MSG_MOD(extack, "Port is already split"); + return -EINVAL; } - /* Split port with non-max cannot be split. */ - if (mlxsw_sp_port->mapping.width != max_width) { - netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); - NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); - return -EINVAL; + mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_sp_port->mapping.module_width / count, + count); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); + return err; } - offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); - if (offset < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); + status = mlxsw_reg_pmtdb_status_get(pmtdb_pl); + if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration"); return -EINVAL; } - /* Only in case max split is being done, the local port and - * base port may differ. - */ - base_port = count == max_width ? - mlxsw_sp_cluster_base_port_get(local_port, max_width) : - local_port; + port_mapping = mlxsw_sp_port->mapping; - for (i = 0; i < count * offset; i++) { - /* Expect base port to exist and also the one in the middle in - * case of maximal split count. - */ - if (i == 0 || (count == max_width && i == count / 2)) - continue; + for (i = 0; i < count; i++) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) { - netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); - NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration"); - return -EINVAL; - } + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); } - port_mapping = mlxsw_sp_port->mapping; - - for (i = 0; i < count; i++) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); - - err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping, - count, offset); + err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping, + count, pmtdb_pl); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); goto err_port_split_create; @@ -2053,7 +2075,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return 0; err_port_split_create: - mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); + mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); return err; } @@ -2062,11 +2084,10 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; + char pmtdb_pl[MLXSW_REG_PMTDB_LEN]; unsigned int count; - int max_width; - u8 base_port; - int offset; int i; + int err; mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); if (!mlxsw_sp_port) { @@ -2077,35 +2098,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port, } if (!mlxsw_sp_port->split) { - netdev_err(mlxsw_sp_port->dev, "Port was not split\n"); NL_SET_ERR_MSG_MOD(extack, "Port was not split"); return -EINVAL; } - max_width = mlxsw_core_module_max_width(mlxsw_core, - mlxsw_sp_port->mapping.module); - if (max_width < 0) { - netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module"); - return max_width; - } + count = mlxsw_sp_port->mapping.module_width / + mlxsw_sp_port->mapping.width; - count = max_width / mlxsw_sp_port->mapping.width; - - offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); - if (WARN_ON(offset < 0)) { - netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); - NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset"); - return -EINVAL; + mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module, + mlxsw_sp_port->mapping.module_width / count, + count); + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query split info"); + return err; } - base_port = mlxsw_sp_port->split_base_local_port; + for (i = 0; i < count; i++) { + u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i); - for (i = 0; i < count; i++) - if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset)) - mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset); + if (mlxsw_sp_port_created(mlxsw_sp, s_local_port)) + mlxsw_sp_port_remove(mlxsw_sp, s_local_port); + } - mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset); + mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3a43cba6d23c..0ebbd9b04b89 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -144,7 +144,8 @@ struct mlxsw_sp_mall_entry; struct mlxsw_sp_port_mapping { u8 module; - u8 width; + u8 width; /* Number of lanes used by the port */ + u8 module_width; /* Number of lanes in the module (static) */ u8 lane; }; @@ -345,7 +346,6 @@ struct mlxsw_sp_port { u16 egr_types; struct mlxsw_sp_ptp_port_stats stats; } ptp; - u8 split_base_local_port; int max_mtu; u32 max_speed; struct mlxsw_sp_hdroom *hdroom; @@ -747,6 +747,7 @@ enum mlxsw_sp_kvdl_entry_type { MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET, MLXSW_SP_KVDL_ENTRY_TYPE_PBS, MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, }; @@ -758,6 +759,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type) case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET: case MLXSW_SP_KVDL_ENTRY_TYPE_PBS: case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR: + case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS: case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT: default: return 1; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c index 3a73d654017f..10ae1115de6c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c @@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = { MAX_KVD_ACTION_SETS), MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE), + MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE), MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE), }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 267590a0eee7..06f1645561c6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1197,6 +1197,15 @@ mlxsw_sp_get_rmon_stats(struct net_device *dev, *ranges = mlxsw_rmon_ranges; } +static int mlxsw_sp_reset(struct net_device *dev, u32 *flags) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags); +} + const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .cap_link_lanes_supported = true, .get_drvinfo = mlxsw_sp_port_get_drvinfo, @@ -1218,6 +1227,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats, .get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats, .get_rmon_stats = mlxsw_sp_get_rmon_stats, + .reset = mlxsw_sp_reset, }; struct mlxsw_sp1_port_link_mode { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 5facabd86882..ad3926de88f2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -24,50 +24,72 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) return tun->parms; } -static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms) { - return !!(parms.i_flags & TUNNEL_KEY); + return !!(parms->i_flags & TUNNEL_KEY); } -static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms) { - return !!(parms.o_flags & TUNNEL_KEY); + return !!(parms->i_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms) +static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms) +{ + return !!(parms->o_flags & TUNNEL_KEY); +} + +static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms) +{ + return !!(parms->o_flags & TUNNEL_KEY); +} + +static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms) { return mlxsw_sp_ipip_parms4_has_ikey(parms) ? - be32_to_cpu(parms.i_key) : 0; + be32_to_cpu(parms->i_key) : 0; } -static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms) +static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms) +{ + return mlxsw_sp_ipip_parms6_has_ikey(parms) ? + be32_to_cpu(parms->i_key) : 0; +} + +static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms) { return mlxsw_sp_ipip_parms4_has_okey(parms) ? - be32_to_cpu(parms.o_key) : 0; + be32_to_cpu(parms->o_key) : 0; +} + +static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms) +{ + return mlxsw_sp_ipip_parms6_has_okey(parms) ? + be32_to_cpu(parms->o_key) : 0; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr }; + return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms) +mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr }; + return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms) +mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr }; + return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr }; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms) +mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms) { - return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr }; + return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr }; } union mlxsw_sp_l3addr @@ -80,10 +102,10 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_saddr(parms4); + return mlxsw_sp_ipip_parms4_saddr(&parms4); case MLXSW_SP_L3_PROTO_IPV6: parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); - return mlxsw_sp_ipip_parms6_saddr(parms6); + return mlxsw_sp_ipip_parms6_saddr(&parms6); } WARN_ON(1); @@ -95,7 +117,7 @@ static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_daddr(parms4).addr4; + return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4; } static union mlxsw_sp_l3addr @@ -108,10 +130,10 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - return mlxsw_sp_ipip_parms4_daddr(parms4); + return mlxsw_sp_ipip_parms4_daddr(&parms4); case MLXSW_SP_L3_PROTO_IPV6: parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev); - return mlxsw_sp_ipip_parms6_daddr(parms6); + return mlxsw_sp_ipip_parms6_daddr(&parms6); } WARN_ON(1); @@ -125,6 +147,21 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) return !memcmp(&addr, &naddr, sizeof(naddr)); } +static struct mlxsw_sp_ipip_parms +mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev) +{ + struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + + return (struct mlxsw_sp_ipip_parms) { + .proto = MLXSW_SP_L3_PROTO_IPV4, + .saddr = mlxsw_sp_ipip_parms4_saddr(&parms), + .daddr = mlxsw_sp_ipip_parms4_daddr(&parms), + .link = parms.link, + .ikey = mlxsw_sp_ipip_parms4_ikey(&parms), + .okey = mlxsw_sp_ipip_parms4_okey(&parms), + }; +} + static int mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry, @@ -158,8 +195,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp, u32 ikey; parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms); - ikey = mlxsw_sp_ipip_parms4_ikey(parms); + has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms); + ikey = mlxsw_sp_ipip_parms4_ikey(&parms); mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); @@ -218,12 +255,12 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; - lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ? + lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ? MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; return (struct mlxsw_sp_rif_ipip_lb_config){ .lb_ipipt = lb_ipipt, - .okey = mlxsw_sp_ipip_parms4_okey(parms), + .okey = mlxsw_sp_ipip_parms4_okey(&parms), .ul_protocol = MLXSW_SP_L3_PROTO_IPV4, .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4, ol_dev), @@ -231,48 +268,39 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_ipip_entry *ipip_entry, - struct netlink_ext_ack *extack) +mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + const struct mlxsw_sp_ipip_parms *new_parms, + struct netlink_ext_ack *extack) { - union mlxsw_sp_l3addr old_saddr, new_saddr; - union mlxsw_sp_l3addr old_daddr, new_daddr; - struct ip_tunnel_parm new_parms; + const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms; bool update_tunnel = false; bool update_decap = false; bool update_nhs = false; int err = 0; - new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev); - - new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms); - old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4); - new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms); - old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4); - - if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) { + if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) { u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); /* Since the local address has changed, if there is another * tunnel with a matching saddr, both need to be demoted. */ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, - MLXSW_SP_L3_PROTO_IPV4, - new_saddr, ul_tb_id, + new_parms->proto, + new_parms->saddr, + ul_tb_id, ipip_entry)) { mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); return 0; } update_tunnel = true; - } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) != - mlxsw_sp_ipip_parms4_okey(new_parms)) || - ipip_entry->parms4.link != new_parms.link) { + } else if (old_parms->okey != new_parms->okey || + old_parms->link != new_parms->link) { update_tunnel = true; - } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) { + } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) { update_nhs = true; - } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) != - mlxsw_sp_ipip_parms4_ikey(new_parms)) { + } else if (old_parms->ikey != new_parms->ikey) { update_decap = true; } @@ -288,23 +316,308 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, false, false, false, extack); + if (err) + return err; - ipip_entry->parms4 = new_parms; - return err; + ipip_entry->parms = *new_parms; + return 0; +} + +static int +mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_ipip_parms new_parms; + + new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev); + return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, + &new_parms, extack); +} + +static int +mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + return 0; +} + +static void +mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ } static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = { .dev_type = ARPHRD_IPGRE, .ul_proto = MLXSW_SP_L3_PROTO_IPV4, + .inc_parsing_depth = false, + .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4, .nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4, .decap_config = mlxsw_sp_ipip_decap_config_gre4, .can_offload = mlxsw_sp_ipip_can_offload_gre4, .ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4, .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4, + .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4, + .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4, }; -const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = { +static struct mlxsw_sp_ipip_parms +mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_parms parms = {0}; + + WARN_ON_ONCE(1); + return parms; +} + +static int +mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static int +mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + u32 tunnel_index) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + return false; +} + +static struct mlxsw_sp_rif_ipip_lb_config +mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct mlxsw_sp_rif_ipip_lb_config config = {0}; + + WARN_ON_ONCE(1); + return config; +} + +static int +mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static int +mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + WARN_ON_ONCE(1); + return -EINVAL; +} + +static void +mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ + WARN_ON_ONCE(1); +} + +static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = { + .dev_type = ARPHRD_IP6GRE, + .ul_proto = MLXSW_SP_L3_PROTO_IPV6, + .inc_parsing_depth = true, + .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6, + .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6, + .decap_config = mlxsw_sp1_ipip_decap_config_gre6, + .can_offload = mlxsw_sp1_ipip_can_offload_gre6, + .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6, + .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6, + .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6, + .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6, +}; + +const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = { [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops, + [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops, +}; + +static struct mlxsw_sp_ipip_parms +mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev); + + return (struct mlxsw_sp_ipip_parms) { + .proto = MLXSW_SP_L3_PROTO_IPV6, + .saddr = mlxsw_sp_ipip_parms6_saddr(&parms), + .daddr = mlxsw_sp_ipip_parms6_daddr(&parms), + .link = parms.link, + .ikey = mlxsw_sp_ipip_parms6_ikey(&parms), + .okey = mlxsw_sp_ipip_parms6_okey(&parms), + }; +} + +static int +mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_ipip_entry *ipip_entry, + bool force, char *ratr_pl) +{ + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + enum mlxsw_reg_ratr_op op; + + op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY : + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY; + mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP, + adj_index, rif_index); + mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl, + ipip_entry->dip_kvdl_index); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int +mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + u32 tunnel_index) +{ + u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); + char rtdp_pl[MLXSW_REG_RTDP_LEN]; + struct __ip6_tnl_parm parms; + unsigned int type_check; + bool has_ikey; + u32 ikey; + + parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms); + ikey = mlxsw_sp_ipip_parms6_ikey(&parms); + + mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); + mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); + + type_check = has_ikey ? + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY : + MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE; + + /* Linux demuxes tunnels based on packet SIP (which must match tunnel + * remote IP). Thus configure decap so that it filters out packets that + * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is + * generated for packets that fail this criterion. Linux then handles + * such packets in slow path and generates ICMP destination unreachable. + */ + mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index, + MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6, + type_check, has_ikey, + ipip_entry->dip_kvdl_index, ikey); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); +} + +static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev); + bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; + bool inherit_ttl = tparm.hop_limit == 0; + __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ + + return (tparm.i_flags & ~okflags) == 0 && + (tparm.o_flags & ~okflags) == 0 && + inherit_ttl && inherit_tos && + mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev); +} + +static struct mlxsw_sp_rif_ipip_lb_config +mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev); + enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; + + lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ? + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP : + MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP; + return (struct mlxsw_sp_rif_ipip_lb_config){ + .lb_ipipt = lb_ipipt, + .okey = mlxsw_sp_ipip_parms6_okey(&parms), + .ul_protocol = MLXSW_SP_L3_PROTO_IPV6, + .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6, + ol_dev), + }; +} + +static int +mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_ipip_parms new_parms; + + new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev); + return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry, + &new_parms, extack); +} + +static int +mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + char rips_pl[MLXSW_REG_RIPS_LEN]; + struct __ip6_tnl_parm parms6; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, + MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + &ipip_entry->dip_kvdl_index); + if (err) + return err; + + parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev); + mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index, + &parms6.raddr); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl); + if (err) + goto err_rips_write; + + return 0; + +err_rips_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + ipip_entry->dip_kvdl_index); + return err; +} + +static void +mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry) +{ + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1, + ipip_entry->dip_kvdl_index); +} + +static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = { + .dev_type = ARPHRD_IP6GRE, + .ul_proto = MLXSW_SP_L3_PROTO_IPV6, + .inc_parsing_depth = true, + .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6, + .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6, + .decap_config = mlxsw_sp2_ipip_decap_config_gre6, + .can_offload = mlxsw_sp2_ipip_can_offload_gre6, + .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6, + .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6, + .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6, + .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6, +}; + +const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = { + [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops, + [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops, }; static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp, @@ -363,3 +676,22 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp) return 0; } + +struct net_device * +mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) +{ + struct net *net = dev_net(ol_dev); + struct ip_tunnel *tun4; + struct ip6_tnl *tun6; + + switch (ol_dev->type) { + case ARPHRD_IPGRE: + tun4 = netdev_priv(ol_dev); + return dev_get_by_index_rcu(net, tun4->parms.link); + case ARPHRD_IP6GRE: + tun6 = netdev_priv(ol_dev); + return dev_get_by_index_rcu(net, tun6->parms.link); + default: + return NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index f0837b42d1d6..8cc259dcc8d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -7,6 +7,7 @@ #include "spectrum_router.h" #include <net/ip_fib.h> #include <linux/if_tunnel.h> +#include <net/ip6_tunnel.h> struct ip_tunnel_parm mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); @@ -21,23 +22,36 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr); enum mlxsw_sp_ipip_type { MLXSW_SP_IPIP_TYPE_GRE4, + MLXSW_SP_IPIP_TYPE_GRE6, MLXSW_SP_IPIP_TYPE_MAX, }; +struct mlxsw_sp_ipip_parms { + enum mlxsw_sp_l3proto proto; + union mlxsw_sp_l3addr saddr; + union mlxsw_sp_l3addr daddr; + int link; + u32 ikey; + u32 okey; +}; + struct mlxsw_sp_ipip_entry { enum mlxsw_sp_ipip_type ipipt; struct net_device *ol_dev; /* Overlay. */ struct mlxsw_sp_rif_ipip_lb *ol_lb; struct mlxsw_sp_fib_entry *decap_fib_entry; struct list_head ipip_list_node; - union { - struct ip_tunnel_parm parms4; - }; + struct mlxsw_sp_ipip_parms parms; + u32 dip_kvdl_index; }; struct mlxsw_sp_ipip_ops { int dev_type; enum mlxsw_sp_l3proto ul_proto; /* Underlay. */ + bool inc_parsing_depth; + + struct mlxsw_sp_ipip_parms + (*parms_init)(const struct net_device *ol_dev); int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry, @@ -58,8 +72,13 @@ struct mlxsw_sp_ipip_ops { int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry, struct netlink_ext_ack *extack); + int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry); + void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_ipip_entry *ipip_entry); }; -extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[]; +extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[]; +extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[]; #endif /* _MLXSW_IPIP_H_*/ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 19bb3ca0515e..1e141b5944cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -115,6 +115,7 @@ struct mlxsw_sp_rif_ops { struct mlxsw_sp_router_ops { int (*init)(struct mlxsw_sp *mlxsw_sp); + int (*ipips_init)(struct mlxsw_sp *mlxsw_sp); }; static struct mlxsw_sp_rif * @@ -1055,22 +1056,13 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->router->vrs); } -static struct net_device * -__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) -{ - struct ip_tunnel *tun = netdev_priv(ol_dev); - struct net *net = dev_net(ol_dev); - - return dev_get_by_index_rcu(net, tun->parms.link); -} - u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) { struct net_device *d; u32 tb_id; rcu_read_lock(); - d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); if (d) tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN; else @@ -1116,6 +1108,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_ipip_ops *ipip_ops; struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_ipip_entry *ret = NULL; + int err; ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL); @@ -1131,26 +1124,30 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, ipip_entry->ipipt = ipipt; ipip_entry->ol_dev = ol_dev; + ipip_entry->parms = ipip_ops->parms_init(ol_dev); - switch (ipip_ops->ul_proto) { - case MLXSW_SP_L3_PROTO_IPV4: - ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); - break; - case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON(1); - break; + err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry); + if (err) { + ret = ERR_PTR(err); + goto err_rem_ip_addr_set; } return ipip_entry; +err_rem_ip_addr_set: + mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); err_ol_ipip_lb_create: kfree(ipip_entry); return ret; } -static void -mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) +static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) { + const struct mlxsw_sp_ipip_ops *ipip_ops = + mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; + + ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry); mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); kfree(ipip_entry); } @@ -1174,6 +1171,32 @@ mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_l3addr_eq(&tun_saddr, &saddr); } +static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops; + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt]; + + /* Not all tunnels require to increase the default pasing depth + * (96 bytes). + */ + if (ipip_ops->inc_parsing_depth) + return mlxsw_sp_parsing_depth_inc(mlxsw_sp); + + return 0; +} + +static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops = + mlxsw_sp->router->ipip_ops_arr[ipipt]; + + if (ipip_ops->inc_parsing_depth) + mlxsw_sp_parsing_depth_dec(mlxsw_sp); +} + static int mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, @@ -1187,18 +1210,32 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp, + ipip_entry->ipipt); + if (err) + goto err_parsing_depth_inc; + ipip_entry->decap_fib_entry = fib_entry; fib_entry->decap.ipip_entry = ipip_entry; fib_entry->decap.tunnel_index = tunnel_index; + return 0; + +err_parsing_depth_inc: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + fib_entry->decap.tunnel_index); + return err; } static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry) { + enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt; + /* Unlink this node from the IPIP entry that it's the decap entry of. */ fib_entry->decap.ipip_entry->decap_fib_entry = NULL; fib_entry->decap.ipip_entry = NULL; + mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt); mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, fib_entry->decap.tunnel_index); } @@ -1309,6 +1346,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, saddr_len = 4; saddr_prefix_len = 32; break; + case MLXSW_SP_L3_PROTO_IPV6: + saddrp = &saddr.addr6; + saddr_len = 16; + saddr_prefix_len = 128; + break; default: WARN_ON(1); return NULL; @@ -1345,7 +1387,7 @@ mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { list_del(&ipip_entry->ipip_list_node); - mlxsw_sp_ipip_entry_dealloc(ipip_entry); + mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry); } static bool @@ -1450,7 +1492,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, struct net_device *ipip_ul_dev; rcu_read_lock(); - ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); rcu_read_unlock(); if (ipip_ul_dev == ul_dev) @@ -1536,23 +1578,34 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id, u16 ul_rif_id, bool enable) { struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; + enum mlxsw_reg_ritr_loopback_ipip_options ipip_options; struct mlxsw_sp_rif *rif = &lb_rif->common; struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; char ritr_pl[MLXSW_REG_RITR_LEN]; + struct in6_addr *saddr6; u32 saddr4; + ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET; switch (lb_cf.ul_protocol) { case MLXSW_SP_L3_PROTO_IPV4: saddr4 = be32_to_cpu(lb_cf.saddr.addr4); mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, rif->rif_index, rif->vr_id, rif->dev->mtu); mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, - MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, - ul_vr_id, ul_rif_id, saddr4, lb_cf.okey); + ipip_options, ul_vr_id, + ul_rif_id, saddr4, + lb_cf.okey); break; case MLXSW_SP_L3_PROTO_IPV6: - return -EAFNOSUPPORT; + saddr6 = &lb_cf.saddr.addr6; + mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, + rif->rif_index, rif->vr_id, rif->dev->mtu); + mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt, + ipip_options, ul_vr_id, + ul_rif_id, saddr6, + lb_cf.okey); + break; } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); @@ -1827,7 +1880,7 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp, struct net_device *ipip_ul_dev; rcu_read_lock(); - ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); rcu_read_unlock(); if (ipip_ul_dev == ul_dev) mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); @@ -4152,7 +4205,7 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev) bool is_up; rcu_read_lock(); - ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true; rcu_read_unlock(); @@ -4376,6 +4429,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } } +static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_ratr_trap_action trap_action; + char ratr_pl[MLXSW_REG_RATR_LEN]; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + &mlxsw_sp->router->adj_trap_index); + if (err) + return err; + + trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP; + mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, + MLXSW_REG_RATR_TYPE_ETHERNET, + mlxsw_sp->router->adj_trap_index, + mlxsw_sp->router->lb_rif_index); + mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); + mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); + if (err) + goto err_ratr_write; + + return 0; + +err_ratr_write: + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_trap_index); + return err; +} + +static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, + mlxsw_sp->router->adj_trap_index); +} + +static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups)) + return 0; + + err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp); + if (err) + return err; + + refcount_set(&mlxsw_sp->router->num_groups, 1); + + return 0; +} + +static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp) +{ + if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups)) + return; + + mlxsw_sp_adj_trap_entry_fini(mlxsw_sp); +} + static void mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nexthop_group *nh_grp, @@ -4790,6 +4903,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop_obj_init; } + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) { NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device"); @@ -4808,6 +4924,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop_obj_init: for (i--; i >= 0; i--) { @@ -4832,6 +4950,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp, cancel_delayed_work(&router->nh_grp_activity_dw); } + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -5223,6 +5342,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop4_init; } + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) goto err_group_refresh; @@ -5230,6 +5352,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop4_init: for (i--; i >= 0; i--) { @@ -5247,6 +5371,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; int i; + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -5725,41 +5850,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp, return err; } -static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp) -{ - enum mlxsw_reg_ratr_trap_action trap_action; - char ratr_pl[MLXSW_REG_RATR_LEN]; - int err; - - if (mlxsw_sp->router->adj_discard_index_valid) - return 0; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - &mlxsw_sp->router->adj_discard_index); - if (err) - return err; - - trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP; - mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true, - MLXSW_REG_RATR_TYPE_ETHERNET, - mlxsw_sp->router->adj_discard_index, - mlxsw_sp->router->lb_rif_index); - mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action); - mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); - if (err) - goto err_ratr_write; - - mlxsw_sp->router->adj_discard_index_valid = true; - - return 0; - -err_ratr_write: - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - mlxsw_sp->router->adj_discard_index); - return err; -} - static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry_op_ctx *op_ctx, struct mlxsw_sp_fib_entry *fib_entry, @@ -5772,7 +5862,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, u16 trap_id = 0; u32 adjacency_index = 0; u16 ecmp_size = 0; - int err; /* In case the nexthop group adjacency index is valid, use it * with provided ECMP size. Otherwise, setup trap and pass @@ -5783,11 +5872,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, adjacency_index = nhgi->adj_index; ecmp_size = nhgi->ecmp_size; } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) { - err = mlxsw_sp_adj_discard_write(mlxsw_sp); - if (err) - return err; trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; - adjacency_index = mlxsw_sp->router->adj_discard_index; + adjacency_index = mlxsw_sp->router->adj_trap_index; ecmp_size = 1; } else { trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; @@ -6036,8 +6122,8 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, } static void -mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: @@ -6048,6 +6134,13 @@ mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, } } +static void +mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib4_entry *fib4_entry) +{ + mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common); +} + static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, @@ -6108,7 +6201,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; fib_info_put(fib4_entry->fi); - mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry); mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group, fib_node->fib); mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); @@ -6641,6 +6734,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); } nh_grp->nhgi = nhgi; + err = mlxsw_sp_nexthop_group_inc(mlxsw_sp); + if (err) + goto err_group_inc; err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); if (err) goto err_group_refresh; @@ -6648,6 +6744,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp, return 0; err_group_refresh: + mlxsw_sp_nexthop_group_dec(mlxsw_sp); +err_group_inc: i = nhgi->count; err_nexthop6_init: for (i--; i >= 0; i--) { @@ -6665,6 +6763,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi; int i; + mlxsw_sp_nexthop_group_dec(mlxsw_sp); for (i = nhgi->count - 1; i >= 0; i--) { struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i]; @@ -6888,11 +6987,38 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry); } -static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - const struct fib6_info *rt) +static int +mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + const struct fib6_info *rt) { - if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) + struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi; + union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr }; + int ifindex = nhgi->nexthops[0].ifindex; + struct mlxsw_sp_ipip_entry *ipip_entry; + + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex, + MLXSW_SP_L3_PROTO_IPV6, + dip); + + if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) { + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; + return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry, + ipip_entry); + } + + return 0; +} + +static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + const struct fib6_info *rt) +{ + if (rt->fib6_flags & RTF_LOCAL) + return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry, + rt); + if (rt->fib6_flags & RTF_ANYCAST) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; else if (rt->fib6_type == RTN_BLACKHOLE) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; @@ -6902,6 +7028,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; else fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + + return 0; } static void @@ -6959,12 +7087,16 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop_group_vr_link; - mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); + err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]); + if (err) + goto err_fib6_entry_type_set; fib_entry->fib_node = fib_node; return fib6_entry; +err_fib6_entry_type_set: + mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib); err_nexthop_group_vr_link: mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry); err_nexthop6_group_get: @@ -6983,11 +7115,19 @@ err_fib_entry_priv_create: return ERR_PTR(err); } +static void +mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common); +} + static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib6_entry *fib6_entry) { struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry); mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group, fib_node->fib); mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); @@ -7340,16 +7480,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) continue; mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); } - - /* After flushing all the routes, it is not possible anyone is still - * using the adjacency index that is discarding packets, so free it in - * case it was allocated. - */ - if (!mlxsw_sp->router->adj_discard_index_valid) - return; - mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1, - mlxsw_sp->router->adj_discard_index); - mlxsw_sp->router->adj_discard_index_valid = false; } struct mlxsw_sp_fib6_event { @@ -9447,7 +9577,6 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) { int err; - mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp); @@ -9460,6 +9589,18 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); } +static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr; + return mlxsw_sp_ipips_init(mlxsw_sp); +} + +static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr; + return mlxsw_sp_ipips_init(mlxsw_sp); +} + static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) { WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list)); @@ -9874,6 +10015,7 @@ static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = { .init = mlxsw_sp1_router_init, + .ipips_init = mlxsw_sp1_ipips_init, }; static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) @@ -9889,6 +10031,7 @@ static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = { .init = mlxsw_sp2_router_init, + .ipips_init = mlxsw_sp2_ipips_init, }; int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, @@ -9934,7 +10077,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rifs_init; - err = mlxsw_sp_ipips_init(mlxsw_sp); + err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp); if (err) goto err_ipips_init; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 25d3eae63501..1d0d28f8ff05 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -65,8 +65,6 @@ struct mlxsw_sp_router { struct notifier_block inet6addr_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr; - u32 adj_discard_index; - bool adj_discard_index_valid; struct mlxsw_sp_router_nve_decap nve_decap_config; struct mutex lock; /* Protects shared router resources */ struct work_struct fib_event_work; @@ -82,6 +80,8 @@ struct mlxsw_sp_router { struct delayed_work nh_grp_activity_dw; struct list_head nh_res_grp_list; bool inc_parsing_depth; + refcount_t num_groups; + u32 adj_trap_index; }; struct mlxsw_sp_fib_entry_priv { @@ -226,6 +226,8 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp); +struct net_device * +mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev); extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops; diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index ee921a99e439..c548e6372352 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -1122,4 +1122,3 @@ module_spi_driver(encx24j600_spi_net_driver); MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index cbece6e9bff2..c6eb0b70dd3f 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -234,8 +234,7 @@ static int sparx5_create_targets(struct sparx5 *sparx5) } iomem[idx] = devm_ioremap(sparx5->dev, iores[idx]->start, - iores[idx]->end - iores[idx]->start - + 1); + resource_size(iores[idx])); if (!iomem[idx]) { dev_err(sparx5->dev, "Unable to get switch registers: %s\n", iores[idx]->name); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 291ae6817c26..5d01993f6be0 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -1134,10 +1134,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) goto out_put_ports; - err = devlink_register(devlink); - if (err) - goto out_ocelot_deinit; - err = mscc_ocelot_init_ports(pdev, ports); if (err) goto out_ocelot_devlink_unregister; @@ -1160,6 +1156,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); of_node_put(ports); + devlink_register(devlink); dev_info(&pdev->dev, "Ocelot switch probed\n"); @@ -1169,8 +1166,6 @@ out_ocelot_release_ports: mscc_ocelot_release_ports(ocelot); mscc_ocelot_teardown_devlink_ports(ocelot); out_ocelot_devlink_unregister: - devlink_unregister(devlink); -out_ocelot_deinit: ocelot_deinit(ocelot); out_put_ports: of_node_put(ports); @@ -1183,11 +1178,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev) { struct ocelot *ocelot = platform_get_drvdata(pdev); + devlink_unregister(ocelot->devlink); ocelot_deinit_timestamp(ocelot); ocelot_devlink_sb_unregister(ocelot); mscc_ocelot_release_ports(ocelot); mscc_ocelot_teardown_devlink_ports(ocelot); - devlink_unregister(ocelot->devlink); ocelot_deinit(ocelot); unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); unregister_switchdev_notifier(&ocelot_switchdev_nb); diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c index 36491835ac65..db297ee4d7ad 100644 --- a/drivers/net/ethernet/netronome/nfp/devlink_param.c +++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c @@ -233,13 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf) if (err <= 0) return err; - err = devlink_params_register(devlink, nfp_devlink_params, - ARRAY_SIZE(nfp_devlink_params)); - if (err) - return err; - - devlink_params_publish(devlink); - return 0; + return devlink_params_register(devlink, nfp_devlink_params, + ARRAY_SIZE(nfp_devlink_params)); } void nfp_devlink_params_unregister(struct nfp_pf *pf) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index d10a93801344..5fbb7c613ff1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -701,10 +701,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_unmap; - err = devlink_register(devlink); - if (err) - goto err_app_clean; - err = nfp_shared_buf_register(pf); if (err) goto err_devlink_unreg; @@ -734,6 +730,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) goto err_stop_app; mutex_unlock(&pf->lock); + devlink_register(devlink); return 0; @@ -751,8 +748,6 @@ err_shared_buf_unreg: nfp_shared_buf_unregister(pf); err_devlink_unreg: cancel_work_sync(&pf->port_refresh_work); - devlink_unregister(devlink); -err_app_clean: nfp_net_pf_app_clean(pf); err_unmap: nfp_net_pci_unmap_mem(pf); @@ -763,6 +758,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf) { struct nfp_net *nn, *next; + devlink_unregister(priv_to_devlink(pf)); mutex_lock(&pf->lock); list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { if (!nfp_net_is_data_vnic(nn)) @@ -779,7 +775,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf) nfp_devlink_params_unregister(pf); nfp_shared_buf_unregister(pf); - devlink_unregister(priv_to_devlink(pf)); nfp_net_pf_free_irqs(pf); nfp_net_pf_app_clean(pf); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 3b8e675087de..369f6ae700c7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -499,8 +499,7 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs) { struct nfp_reprs *reprs; - reprs = kzalloc(sizeof(*reprs) + - num_reprs * sizeof(struct net_device *), GFP_KERNEL); + reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL); if (!reprs) return NULL; reprs->num_reprs = num_reprs; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index c7d0e195d176..2267da95640b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -82,12 +82,6 @@ int ionic_devlink_register(struct ionic *ionic) struct devlink_port_attrs attrs = {}; int err; - err = devlink_register(dl); - if (err) { - dev_warn(ionic->dev, "devlink_register failed: %d\n", err); - return err; - } - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; devlink_port_attrs_set(&ionic->dl_port, &attrs); err = devlink_port_register(dl, &ionic->dl_port, 0); @@ -98,6 +92,7 @@ int ionic_devlink_register(struct ionic *ionic) } devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev); + devlink_register(dl); return 0; } @@ -105,6 +100,6 @@ void ionic_devlink_unregister(struct ionic *ionic) { struct devlink *dl = priv_to_devlink(ionic); - devlink_port_unregister(&ionic->dl_port); devlink_unregister(dl); + devlink_port_unregister(&ionic->dl_port); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index 78070682f2df..6bb4e165b592 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -215,10 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) qdevlink = devlink_priv(dl); qdevlink->cdev = cdev; - rc = devlink_register(dl); - if (rc) - goto err_free; - rc = devlink_params_register(dl, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); if (rc) @@ -229,17 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) QED_DEVLINK_PARAM_ID_IWARP_CMT, value); - devlink_params_publish(dl); cdev->iwarp_cmt = false; qed_fw_reporters_create(dl); - + devlink_register(dl); return dl; err_unregister: - devlink_unregister(dl); - -err_free: devlink_free(dl); return ERR_PTR(rc); @@ -250,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink) if (!devlink) return; + devlink_unregister(devlink); qed_fw_reporters_destroy(devlink); devlink_params_unregister(devlink, qed_devlink_params, ARRAY_SIZE(qed_devlink_params)); - devlink_unregister(devlink); devlink_free(devlink); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index dfaf10edfabf..ba8c7a31cce1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2763,25 +2763,6 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev, return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); } -static int qed_configure_filter(struct qed_dev *cdev, - struct qed_filter_params *params) -{ - enum qed_filter_rx_mode_type accept_flags; - - switch (params->type) { - case QED_FILTER_TYPE_UCAST: - return qed_configure_filter_ucast(cdev, ¶ms->filter.ucast); - case QED_FILTER_TYPE_MCAST: - return qed_configure_filter_mcast(cdev, ¶ms->filter.mcast); - case QED_FILTER_TYPE_RX_MODE: - accept_flags = params->filter.accept_flags; - return qed_configure_filter_rx_mode(cdev, accept_flags); - default: - DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type); - return -EINVAL; - } -} - static int qed_configure_arfs_searcher(struct qed_dev *cdev, enum qed_filter_config_mode mode) { @@ -2904,7 +2885,9 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .q_rx_stop = &qed_stop_rxq, .q_tx_start = &qed_start_txq, .q_tx_stop = &qed_stop_txq, - .filter_config = &qed_configure_filter, + .filter_config_rx_mode = &qed_configure_filter_rx_mode, + .filter_config_ucast = &qed_configure_filter_ucast, + .filter_config_mcast = &qed_configure_filter_mcast, .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index a2e4dfb5cb44..f99b085b56a5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -619,30 +619,28 @@ static int qede_set_ucast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, unsigned char mac[ETH_ALEN]) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - ether_addr_copy(filter_cmd.filter.ucast.mac, mac); + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.mac_valid = 1; + ether_addr_copy(ucast.mac, mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_set_ucast_rx_vlan(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, u16 vid) { - struct qed_filter_params filter_cmd; + struct qed_filter_ucast_params ucast; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; + memset(&ucast, 0, sizeof(ucast)); + ucast.type = opcode; + ucast.vlan_valid = 1; + ucast.vlan = vid; - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_ucast(edev->cdev, &ucast); } static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action) @@ -1057,18 +1055,17 @@ static int qede_set_mcast_rx_mac(struct qede_dev *edev, enum qed_filter_xcast_params_type opcode, unsigned char *mac, int num_macs) { - struct qed_filter_params filter_cmd; + struct qed_filter_mcast_params mcast; int i; - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_MCAST; - filter_cmd.filter.mcast.type = opcode; - filter_cmd.filter.mcast.num = num_macs; + memset(&mcast, 0, sizeof(mcast)); + mcast.type = opcode; + mcast.num = num_macs; for (i = 0; i < num_macs; i++, mac += ETH_ALEN) - ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); + ether_addr_copy(mcast.mac[i], mac); - return edev->ops->filter_config(edev->cdev, &filter_cmd); + return edev->ops->filter_config_mcast(edev->cdev, &mcast); } int qede_set_mac_addr(struct net_device *ndev, void *p) @@ -1194,7 +1191,6 @@ void qede_config_rx_mode(struct net_device *ndev) { enum qed_filter_rx_mode_type accept_flags; struct qede_dev *edev = netdev_priv(ndev); - struct qed_filter_params rx_mode; unsigned char *uc_macs, *temp; struct netdev_hw_addr *ha; int rc, uc_count; @@ -1220,10 +1216,6 @@ void qede_config_rx_mode(struct net_device *ndev) netif_addr_unlock_bh(ndev); - /* Configure the struct for the Rx mode */ - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - /* Remove all previous unicast secondary macs and multicast macs * (configure / leave the primary mac) */ @@ -1271,8 +1263,7 @@ void qede_config_rx_mode(struct net_device *ndev) qede_config_accept_any_vlan(edev, false); } - rx_mode.filter.accept_flags = accept_flags; - edev->ops->filter_config(edev->cdev, &rx_mode); + edev->ops->filter_config_rx_mode(edev->cdev, accept_flags); out: kfree(uc_macs); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 9837bdb89cd4..ee4c3bd28a93 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1176,19 +1176,17 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->devlink = qed_ops->common->devlink_register(cdev); if (IS_ERR(edev->devlink)) { DP_NOTICE(edev, "Cannot register devlink\n"); + rc = PTR_ERR(edev->devlink); edev->devlink = NULL; - /* Go on, we can live without devlink */ + goto err3; } } else { struct net_device *ndev = pci_get_drvdata(pdev); + struct qed_devlink *qdl; edev = netdev_priv(ndev); - - if (edev->devlink) { - struct qed_devlink *qdl = devlink_priv(edev->devlink); - - qdl->cdev = cdev; - } + qdl = devlink_priv(edev->devlink); + qdl->cdev = cdev; edev->cdev = cdev; memset(&edev->stats, 0, sizeof(edev->stats)); memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 2728df46ec41..8da4b66b71b5 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -37,7 +37,7 @@ enum mac_version { RTL_GIGA_MAC_VER_24, RTL_GIGA_MAC_VER_25, RTL_GIGA_MAC_VER_26, - RTL_GIGA_MAC_VER_27, + /* support for RTL_GIGA_MAC_VER_27 has been removed */ RTL_GIGA_MAC_VER_28, RTL_GIGA_MAC_VER_29, RTL_GIGA_MAC_VER_30, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 46a6ff9a782d..0199914440ab 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -118,7 +118,6 @@ static const struct { [RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" }, [RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1}, [RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2}, - [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" }, [RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1}, [RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1}, @@ -985,33 +984,6 @@ DECLARE_RTL_COND(rtl_ocpar_cond) return RTL_R32(tp, OCPAR) & OCPAR_FLAG; } -static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) -{ - RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); - RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); -} - -static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) -{ - r8168dp_1_mdio_access(tp, reg, - OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); -} - -static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) -{ - r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); - - mdelay(1); - RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD); - RTL_W32(tp, EPHY_RXER_NUM, 0); - - return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? - RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT; -} - #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 static void r8168dp_2_mdio_start(struct rtl8169_private *tp) @@ -1053,9 +1025,6 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) static void rtl_writephy(struct rtl8169_private *tp, int location, int val) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - r8168dp_1_mdio_write(tp, location, val); - break; case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); @@ -1072,8 +1041,6 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) static int rtl_readphy(struct rtl8169_private *tp, int location) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - return r8168dp_1_mdio_read(tp, location); case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); @@ -1235,7 +1202,6 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp) static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp) { switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE; @@ -2040,8 +2006,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168DP family. */ /* It seems this early RTL8168dp version never made it to - * the wild. Let's see whether somebody complains, if not - * we'll remove support for this chip version completely. + * the wild. Support has been removed. * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, */ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, @@ -2371,7 +2336,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp) r8168c_hw_jumbo_disable(tp); } break; - case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_28: if (jumbo) r8168dp_hw_jumbo_enable(tp); else @@ -3719,7 +3684,6 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3, [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d, - [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d, [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4, [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1, [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2, @@ -3982,7 +3946,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) goto no_reset; switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000); diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 50f0f621b1aa..f7ad5487879b 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -548,64 +548,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp, rtl8168d_apply_firmware_cond(tp, phydev, 0xb300); } -static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) -{ - static const struct phy_reg phy_reg_init[] = { - { 0x1f, 0x0002 }, - { 0x10, 0x0008 }, - { 0x0d, 0x006c }, - - { 0x1f, 0x0000 }, - { 0x0d, 0xf880 }, - - { 0x1f, 0x0001 }, - { 0x17, 0x0cc0 }, - - { 0x1f, 0x0001 }, - { 0x0b, 0xa4d8 }, - { 0x09, 0x281c }, - { 0x07, 0x2883 }, - { 0x0a, 0x6b35 }, - { 0x1d, 0x3da4 }, - { 0x1c, 0xeffd }, - { 0x14, 0x7f52 }, - { 0x18, 0x7fc6 }, - { 0x08, 0x0601 }, - { 0x06, 0x4063 }, - { 0x10, 0xf074 }, - { 0x1f, 0x0003 }, - { 0x13, 0x0789 }, - { 0x12, 0xf4bd }, - { 0x1a, 0x04fd }, - { 0x14, 0x84b0 }, - { 0x1f, 0x0000 }, - { 0x00, 0x9200 }, - - { 0x1f, 0x0005 }, - { 0x01, 0x0340 }, - { 0x1f, 0x0001 }, - { 0x04, 0x4000 }, - { 0x03, 0x1d21 }, - { 0x02, 0x0c32 }, - { 0x01, 0x0200 }, - { 0x00, 0x5554 }, - { 0x04, 0x4800 }, - { 0x04, 0x4000 }, - { 0x04, 0xf000 }, - { 0x03, 0xdf01 }, - { 0x02, 0xdf20 }, - { 0x01, 0x101a }, - { 0x00, 0xa0ff }, - { 0x04, 0xf800 }, - { 0x04, 0xf000 }, - { 0x1f, 0x0000 }, - }; - - rtl_writephy_batch(phydev, phy_reg_init); - r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000); -} - static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1332,7 +1274,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config, [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config, [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config, - [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config, [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config, [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config, [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config, diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 3d1a18a01ce5..be4d772e68cf 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1070,7 +1070,7 @@ static int sis190_open(struct net_device *dev) /* * Rx and Tx descriptors need 256 bytes alignment. - * pci_alloc_consistent() guarantees a stronger alignment. + * dma_alloc_coherent() guarantees a stronger alignment. */ tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES, &tp->tx_dma, GFP_KERNEL); diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index fdbd2a43e267..3d1176588f7d 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -788,7 +788,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index) PKT_BUF_SZ, DMA_FROM_DEVICE); if (dma_mapping_error(&pd->pdev->dev, mapping)) { dev_kfree_skb_any(skb); - netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n"); + netif_warn(pd, rx_err, pd->dev, "dma_map_single failed!\n"); return -ENOMEM; } @@ -940,7 +940,7 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb, DMA_TO_DEVICE); if (dma_mapping_error(&pd->pdev->dev, mapping)) { netif_warn(pd, tx_err, pd->dev, - "pci_map_single failed, dropping packet\n"); + "dma_map_single failed, dropping packet\n"); return NETDEV_TX_BUSY; } @@ -1551,7 +1551,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!pd->rx_ring) goto out_free_io_4; - /* descriptors are aligned due to the nature of pci_alloc_consistent */ + /* descriptors are aligned due to the nature of dma_alloc_coherent */ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE); pd->tx_dma_addr = pd->rx_dma_addr + sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE; diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 1f46af136aa8..f80a2aef9972 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1860,10 +1860,9 @@ static int netsec_of_probe(struct platform_device *pdev, *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np); priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ - if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "phy_ref_clk not found\n"); - return PTR_ERR(priv->clk); - } + if (IS_ERR(priv->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), + "phy_ref_clk not found\n"); priv->freq = clk_get_rate(priv->clk); return 0; @@ -1886,19 +1885,17 @@ static int netsec_acpi_probe(struct platform_device *pdev, priv->phy_interface = PHY_INTERFACE_MODE_NA; ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); - if (ret) { - dev_err(&pdev->dev, - "missing required property 'phy-channel'\n"); - return ret; - } + if (ret) + return dev_err_probe(&pdev->dev, ret, + "missing required property 'phy-channel'\n"); ret = device_property_read_u32(&pdev->dev, "socionext,phy-clock-frequency", &priv->freq); if (ret) - dev_err(&pdev->dev, - "missing required property 'socionext,phy-clock-frequency'\n"); - return ret; + return dev_err_probe(&pdev->dev, ret, + "missing required property 'socionext,phy-clock-frequency'\n"); + return 0; } static void netsec_unregister_mdio(struct netsec_priv *priv) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index d046e33b8a29..66fc8be34bb7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -171,10 +171,9 @@ static int visconti_eth_clock_probe(struct platform_device *pdev, int err; dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(dwmac->phy_ref_clk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - return PTR_ERR(dwmac->phy_ref_clk); - } + if (IS_ERR(dwmac->phy_ref_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->phy_ref_clk), + "phy_ref_clk clock not found.\n"); err = clk_prepare_enable(dwmac->phy_ref_clk); if (err < 0) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 981ccf47dcea..0600a29ae451 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3506,6 +3506,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) /* Request Rx MSI irq */ for (i = 0; i < priv->plat->rx_queues_to_use; i++) { + if (i >= MTL_MAX_RX_QUEUES) + break; if (priv->rx_irq[i] == 0) continue; @@ -3529,6 +3531,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) /* Request Tx MSI irq */ for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + if (i >= MTL_MAX_TX_QUEUES) + break; if (priv->tx_irq[i] == 0) continue; diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 6e4d4f9e32e0..b05de9b61ad6 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -61,7 +61,7 @@ struct am65_cpsw_regdump_item { #define AM65_CPSW_REGDUMP_REC(mod, start, end) { \ .hdr.module_id = (mod), \ - .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \ + .hdr.len = (end + 4 - start) * 2 + \ sizeof(struct am65_cpsw_regdump_hdr), \ .start_ofs = (start), \ .end_ofs = end, \ diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 130346f74ee8..0de5f4a4fe08 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2429,12 +2429,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) dl_priv = devlink_priv(common->devlink); dl_priv->common = common; - ret = devlink_register(common->devlink); - if (ret) { - dev_err(dev, "devlink reg fail ret:%d\n", ret); - goto dl_free; - } - /* Provide devlink hook to switch mode when multiple external ports * are present NUSS switchdev driver is enabled. */ @@ -2447,7 +2441,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) dev_err(dev, "devlink params reg fail ret:%d\n", ret); goto dl_unreg; } - devlink_params_publish(common->devlink); } for (i = 1; i <= common->port_num; i++) { @@ -2468,7 +2461,7 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) } devlink_port_type_eth_set(dl_port, port->ndev); } - + devlink_register(common->devlink); return ret; dl_port_unreg: @@ -2479,10 +2472,7 @@ dl_port_unreg: devlink_port_unregister(dl_port); } dl_unreg: - devlink_unregister(common->devlink); -dl_free: devlink_free(common->devlink); - return ret; } @@ -2492,6 +2482,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) struct am65_cpsw_port *port; int i; + devlink_unregister(common->devlink); + for (i = 1; i <= common->port_num; i++) { port = am65_common_get_port(common, i); dl_port = &port->devlink_port; @@ -2500,13 +2492,11 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) } if (!AM65_CPSW_IS_CPSW2G(common) && - IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { - devlink_params_unpublish(common->devlink); - devlink_params_unregister(common->devlink, am65_cpsw_devlink_params, + IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) + devlink_params_unregister(common->devlink, + am65_cpsw_devlink_params, ARRAY_SIZE(am65_cpsw_devlink_params)); - } - devlink_unregister(common->devlink); devlink_free(common->devlink); } diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 7968f24d99c8..1530532748a8 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1810,12 +1810,6 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) dl_priv = devlink_priv(cpsw->devlink); dl_priv->cpsw = cpsw; - ret = devlink_register(cpsw->devlink); - if (ret) { - dev_err(dev, "DL reg fail ret:%d\n", ret); - goto dl_free; - } - ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params, ARRAY_SIZE(cpsw_devlink_params)); if (ret) { @@ -1823,22 +1817,19 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) goto dl_unreg; } - devlink_params_publish(cpsw->devlink); + devlink_register(cpsw->devlink); return ret; dl_unreg: - devlink_unregister(cpsw->devlink); -dl_free: devlink_free(cpsw->devlink); return ret; } static void cpsw_unregister_devlink(struct cpsw_common *cpsw) { - devlink_params_unpublish(cpsw->devlink); + devlink_unregister(cpsw->devlink); devlink_params_unregister(cpsw->devlink, cpsw_devlink_params, ARRAY_SIZE(cpsw_devlink_params)); - devlink_unregister(cpsw->devlink); devlink_free(cpsw->devlink); } diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 6d1e3f49a3d3..5810e8473789 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -1028,7 +1028,7 @@ static void dfx_bus_config_check(DFX_board_t *bp) * or read adapter MAC address * * Assumptions: - * Memory allocated from pci_alloc_consistent() call is physically + * Memory allocated from dma_alloc_coherent() call is physically * contiguous, locked memory. * * Side Effects: @@ -3249,7 +3249,7 @@ static void dfx_rcv_queue_process( * is contained in a single physically contiguous buffer * in which the virtual address of the start of packet * (skb->data) can be converted to a physical address - * by using pci_map_single(). + * by using dma_map_single(). * * Since the adapter architecture requires a three byte * packet request header to prepend the start of packet, @@ -3402,7 +3402,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, * skb->data. * 6. The physical address of the start of packet * can be determined from the virtual address - * by using pci_map_single() and is only 32-bits + * by using dma_map_single() and is only 32-bits * wide. */ diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c index c5cb421f9890..cc5126ea7ef5 100644 --- a/drivers/net/fddi/skfp/skfddi.c +++ b/drivers/net/fddi/skfp/skfddi.c @@ -1012,7 +1012,7 @@ static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __ * is contained in a single physically contiguous buffer * in which the virtual address of the start of packet * (skb->data) can be converted to a physical address - * by using pci_map_single(). + * by using dma_map_single(). * * We have an internal queue for packets we can not send * immediately. Packets in this queue can be given to the diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 54313bd57797..cb6645012a30 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -1470,10 +1470,6 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) if (err) goto err_devlink_free; - err = devlink_register(devlink); - if (err) - goto err_resources_unregister; - err = devlink_params_register(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); if (err) @@ -1514,9 +1510,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev) if (err) goto err_psample_exit; - devlink_params_publish(devlink); - devlink_reload_enable(devlink); nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY; + devlink_register(devlink); + devlink_reload_enable(devlink); return 0; err_psample_exit: @@ -1537,8 +1533,6 @@ err_params_unregister: devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); err_dl_unregister: - devlink_unregister(devlink); -err_resources_unregister: devlink_resources_unregister(devlink, NULL); err_devlink_free: devlink_free(devlink); @@ -1573,6 +1567,7 @@ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev) struct devlink *devlink = priv_to_devlink(nsim_dev); devlink_reload_disable(devlink); + devlink_unregister(devlink); nsim_dev_reload_destroy(nsim_dev); @@ -1580,7 +1575,6 @@ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev) nsim_dev_debugfs_exit(nsim_dev); devlink_params_unregister(devlink, nsim_devlink_params, ARRAY_SIZE(nsim_devlink_params)); - devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); devlink_free(devlink); } diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index b03a0513eb7e..0ab6a40be611 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -81,6 +81,30 @@ static int nsim_set_ringparam(struct net_device *dev, return 0; } +static void +nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct netdevsim *ns = netdev_priv(dev); + + ch->max_combined = ns->nsim_bus_dev->num_queues; + ch->combined_count = ns->ethtool.channels; +} + +static int +nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct netdevsim *ns = netdev_priv(dev); + int err; + + err = netif_set_real_num_queues(dev, ch->combined_count, + ch->combined_count); + if (err) + return err; + + ns->ethtool.channels = ch->combined_count; + return 0; +} + static int nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { @@ -118,6 +142,8 @@ static const struct ethtool_ops nsim_ethtool_ops = { .get_coalesce = nsim_get_coalesce, .get_ringparam = nsim_get_ringparam, .set_ringparam = nsim_set_ringparam, + .get_channels = nsim_get_channels, + .set_channels = nsim_set_channels, .get_fecparam = nsim_get_fecparam, .set_fecparam = nsim_set_fecparam, }; @@ -141,6 +167,8 @@ void nsim_ethtool_init(struct netdevsim *ns) ns->ethtool.fec.fec = ETHTOOL_FEC_NONE; ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE; + ns->ethtool.channels = ns->nsim_bus_dev->num_queues; + ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir); debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err); diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index 04aebdf85747..aa77af4a68df 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -110,26 +110,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len) if (err) return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_bool_put(fmsg, true); - if (err) - return err; - } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_u8_put(fmsg, i); - if (err) - return err; - } err = devlink_fmsg_arr_pair_nest_end(fmsg); if (err) return err; @@ -146,18 +126,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len) if (err) return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array"); - if (err) - return err; - for (i = 0; i < 10; i++) { - err = devlink_fmsg_u64_put(fmsg, i); - if (err) - return err; - } - err = devlink_fmsg_arr_pair_nest_end(fmsg); - if (err) - return err; - err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects"); if (err) return err; diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 793c86dc5a9c..d42eec05490f 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -62,6 +62,7 @@ struct nsim_ethtool_pauseparam { struct nsim_ethtool { u32 get_err; u32 set_err; + u32 channels; struct nsim_ethtool_pauseparam pauseparam; struct ethtool_coalesce coalesce; struct ethtool_ringparam ring; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index bdac087058b2..3feee4d59030 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -150,7 +150,8 @@ #define ATH8035_PHY_ID 0x004dd072 #define AT8030_PHY_ID_MASK 0xffffffef -#define QCA8327_PHY_ID 0x004dd034 +#define QCA8327_A_PHY_ID 0x004dd033 +#define QCA8327_B_PHY_ID 0x004dd034 #define QCA8337_PHY_ID 0x004dd036 #define QCA8K_PHY_ID_MASK 0xffffffff @@ -1409,17 +1410,49 @@ static struct phy_driver at803x_driver[] = { .config_aneg = at803x_config_aneg, }, { /* QCA8337 */ - .phy_id = QCA8337_PHY_ID, - .phy_id_mask = QCA8K_PHY_ID_MASK, - .name = "QCA PHY 8337", + .phy_id = QCA8337_PHY_ID, + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8337 internal PHY", /* PHY_GBIT_FEATURES */ - .probe = at803x_probe, - .flags = PHY_IS_INTERNAL, - .config_init = qca83xx_config_init, - .soft_reset = genphy_soft_reset, - .get_sset_count = at803x_get_sset_count, - .get_strings = at803x_get_strings, - .get_stats = at803x_get_stats, + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, + .soft_reset = genphy_soft_reset, + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, +}, { + /* QCA8327-A from switch QCA8327-AL1A */ + .phy_id = QCA8327_A_PHY_ID, + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8327-A internal PHY", + /* PHY_GBIT_FEATURES */ + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, + .soft_reset = genphy_soft_reset, + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, +}, { + /* QCA8327-B from switch QCA8327-BL1A */ + .phy_id = QCA8327_B_PHY_ID, + .phy_id_mask = QCA8K_PHY_ID_MASK, + .name = "Qualcomm Atheros 8327-B internal PHY", + /* PHY_GBIT_FEATURES */ + .probe = at803x_probe, + .flags = PHY_IS_INTERNAL, + .config_init = qca83xx_config_init, + .soft_reset = genphy_soft_reset, + .get_sset_count = at803x_get_sset_count, + .get_strings = at803x_get_strings, + .get_stats = at803x_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, }, }; module_phy_driver(at803x_driver); @@ -1430,6 +1463,9 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = { { PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) }, { PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) }, { PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) }, + { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) }, { } }; diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 27b6a3f507ae..6ceadd2a0082 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -415,6 +415,190 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev) return bcm7xxx_28nm_ephy_apd_enable(phydev); } +static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev) +{ + int tmp, rcalcode, rcalnewcodelp, rcalnewcode11, rcalnewcode11d2; + + /* Reset PHY */ + tmp = genphy_soft_reset(phydev); + if (tmp) + return tmp; + + /* Reset AFE and PLL */ + bcm_phy_write_exp_sel(phydev, 0x0003, 0x0006); + /* Clear reset */ + bcm_phy_write_exp_sel(phydev, 0x0003, 0x0000); + + /* Write PLL/AFE control register to select 54MHz crystal */ + bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0000); + bcm_phy_write_misc(phydev, 0x0031, 0x0000, 0x044a); + + /* Change Ka,Kp,Ki to pdiv=1 */ + bcm_phy_write_misc(phydev, 0x0033, 0x0002, 0x71a1); + /* Configuration override */ + bcm_phy_write_misc(phydev, 0x0033, 0x0001, 0x8000); + + /* Change PLL_NDIV and PLL_NUDGE */ + bcm_phy_write_misc(phydev, 0x0031, 0x0001, 0x2f68); + bcm_phy_write_misc(phydev, 0x0031, 0x0002, 0x0000); + + /* Reference frequency is 54Mhz, config_mode[15:14] = 3 (low + * phase) + */ + bcm_phy_write_misc(phydev, 0x0030, 0x0003, 0xc036); + + /* Initialize bypass mode */ + bcm_phy_write_misc(phydev, 0x0032, 0x0003, 0x0000); + /* Bypass code, default: VCOCLK enabled */ + bcm_phy_write_misc(phydev, 0x0033, 0x0000, 0x0002); + /* LDOs at default setting */ + bcm_phy_write_misc(phydev, 0x0030, 0x0002, 0x01c0); + /* Release PLL reset */ + bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0001); + + /* Bandgap curvature correction to correct default */ + bcm_phy_write_misc(phydev, 0x0038, 0x0000, 0x0010); + + /* Run RCAL */ + bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x0038); + bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003b); + udelay(2); + bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003f); + mdelay(5); + + /* AFE_CAL_CONFIG_0, Vref=1000, Target=10, averaging enabled */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x1c82); + /* AFE_CAL_CONFIG_0, no reset and analog powerup */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e82); + udelay(2); + /* AFE_CAL_CONFIG_0, start calibration */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f82); + udelay(100); + /* AFE_CAL_CONFIG_0, clear start calibration, set HiBW */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e86); + udelay(2); + /* AFE_CAL_CONFIG_0, start calibration with hi BW mode set */ + bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f86); + udelay(100); + + /* Adjust 10BT amplitude additional +7% and 100BT +2% */ + bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7ea); + /* Adjust 1G mode amplitude and 1G testmode1 */ + bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0); + + /* Read CORE_EXPA9 */ + tmp = bcm_phy_read_exp(phydev, 0x00a9); + /* CORE_EXPA9[6:1] is rcalcode[5:0] */ + rcalcode = (tmp & 0x7e) / 2; + /* Correct RCAL code + 1 is -1% rprogr, LP: +16 */ + rcalnewcodelp = rcalcode + 16; + /* Correct RCAL code + 1 is -15 rprogr, 11: +10 */ + rcalnewcode11 = rcalcode + 10; + /* Saturate if necessary */ + if (rcalnewcodelp > 0x3f) + rcalnewcodelp = 0x3f; + if (rcalnewcode11 > 0x3f) + rcalnewcode11 = 0x3f; + /* REXT=1 BYP=1 RCAL_st1<5:0>=new rcal code */ + tmp = 0x00f8 + rcalnewcodelp * 256; + /* Program into AFE_CAL_CONFIG_2 */ + bcm_phy_write_misc(phydev, 0x0039, 0x0003, tmp); + /* AFE_BIAS_CONFIG_0 10BT bias code (Bias: E4) */ + bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7e4); + /* invert adc clock output and 'adc refp ldo current To correct + * default + */ + bcm_phy_write_misc(phydev, 0x003b, 0x0000, 0x8002); + /* 100BT stair case, high BW, 1G stair case, alternate encode */ + bcm_phy_write_misc(phydev, 0x003c, 0x0003, 0xf882); + /* 1000BT DAC transition method per Erol, bits[32], DAC Shuffle + * sequence 1 + 10BT imp adjust bits + */ + bcm_phy_write_misc(phydev, 0x003d, 0x0000, 0x3201); + /* Non-overlap fix */ + bcm_phy_write_misc(phydev, 0x003a, 0x0002, 0x0c00); + + /* pwdb override (rxconfig<5>) to turn on RX LDO indpendent of + * pwdb controls from DSP_TAP10 + */ + bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0020); + + /* Remove references to channel 2 and 3 */ + bcm_phy_write_misc(phydev, 0x003b, 0x0002, 0x0000); + bcm_phy_write_misc(phydev, 0x003b, 0x0003, 0x0000); + + /* Set cal_bypassb bit rxconfig<43> */ + bcm_phy_write_misc(phydev, 0x003a, 0x0003, 0x0800); + udelay(2); + + /* Revert pwdb_override (rxconfig<5>) to 0 so that the RX pwr + * is controlled by DSP. + */ + bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0000); + + /* Drop LSB */ + rcalnewcode11d2 = (rcalnewcode11 & 0xfffe) / 2; + tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0001); + /* Clear bits [11:5] */ + tmp &= ~0xfe0; + /* set txcfg_ch0<5>=1 (enable + set local rcal) */ + tmp |= 0x0020 | (rcalnewcode11d2 * 64); + bcm_phy_write_misc(phydev, 0x003d, 0x0001, tmp); + bcm_phy_write_misc(phydev, 0x003d, 0x0002, tmp); + + tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0000); + /* set txcfg<45:44>=11 (enable Rextra + invert fullscaledetect) + */ + tmp &= ~0x3000; + tmp |= 0x3000; + bcm_phy_write_misc(phydev, 0x003d, 0x0000, tmp); + + return 0; +} + +static int bcm7xxx_16nm_ephy_config_init(struct phy_device *phydev) +{ + int ret, val; + + ret = bcm7xxx_16nm_ephy_afe_config(phydev); + if (ret) + return ret; + + ret = bcm_phy_set_eee(phydev, true); + if (ret) + return ret; + + ret = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3); + if (ret < 0) + return ret; + + val = ret; + + /* Auto power down of DLL enabled, + * TXC/RXC disabled during auto power down. + */ + val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS; + val |= BIT(8); + + ret = bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val); + if (ret < 0) + return ret; + + return bcm_phy_enable_apd(phydev, true); +} + +static int bcm7xxx_16nm_ephy_resume(struct phy_device *phydev) +{ + int ret; + + /* Re-apply workarounds coming out suspend/resume */ + ret = bcm7xxx_16nm_ephy_config_init(phydev); + if (ret) + return ret; + + return genphy_config_aneg(phydev); +} + #define MII_BCM7XXX_REG_INVALID 0xff static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum) @@ -716,9 +900,25 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev) .resume = bcm7xxx_config_init, \ } +#define BCM7XXX_16NM_EPHY(_oui, _name) \ +{ \ + .phy_id = (_oui), \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + /* PHY_BASIC_FEATURES */ \ + .flags = PHY_IS_INTERNAL, \ + .probe = bcm7xxx_28nm_probe, \ + .remove = bcm7xxx_28nm_remove, \ + .config_init = bcm7xxx_16nm_ephy_config_init, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + .resume = bcm7xxx_16nm_ephy_resume, \ +} + static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"), BCM7XXX_28NM_EPHY(PHY_ID_BCM72116, "Broadcom BCM72116"), + BCM7XXX_16NM_EPHY(PHY_ID_BCM72165, "Broadcom BCM72165"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"), @@ -741,6 +941,7 @@ static struct phy_driver bcm7xxx_driver[] = { static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM72113, 0xfffffff0 }, { PHY_ID_BCM72116, 0xfffffff0, }, + { PHY_ID_BCM72165, 0xfffffff0, }, { PHY_ID_BCM7250, 0xfffffff0, }, { PHY_ID_BCM7255, 0xfffffff0, }, { PHY_ID_BCM7260, 0xfffffff0, }, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 83aea5c5cd03..bb5104ae4610 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -392,10 +392,50 @@ static int bcm54xx_config_init(struct phy_device *phydev) return 0; } +static int bcm54xx_iddq_set(struct phy_device *phydev, bool enable) +{ + int ret = 0; + + if (!(phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND)) + return ret; + + ret = bcm_phy_read_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL); + if (ret < 0) + goto out; + + if (enable) + ret |= BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP; + else + ret &= ~(BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP); + + ret = bcm_phy_write_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL, ret); +out: + return ret; +} + +static int bcm54xx_suspend(struct phy_device *phydev) +{ + int ret; + + /* We cannot use a read/modify/write here otherwise the PHY gets into + * a bad state where its LEDs keep flashing, thus defeating the purpose + * of low power mode. + */ + ret = phy_write(phydev, MII_BMCR, BMCR_PDOWN); + if (ret < 0) + return ret; + + return bcm54xx_iddq_set(phydev, true); +} + static int bcm54xx_resume(struct phy_device *phydev) { int ret; + ret = bcm54xx_iddq_set(phydev, false); + if (ret < 0) + return ret; + /* Writes to register other than BMCR would be ignored * unless we clear the PDOWN bit first */ @@ -408,6 +448,15 @@ static int bcm54xx_resume(struct phy_device *phydev) */ fsleep(40); + /* Issue a soft reset after clearing the power down bit + * and before doing any other configuration. + */ + if (phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND) { + ret = genphy_soft_reset(phydev); + if (ret < 0) + return ret; + } + return bcm54xx_config_init(phydev); } @@ -702,6 +751,36 @@ static void bcm54xx_get_stats(struct phy_device *phydev, bcm_phy_get_stats(phydev, priv->stats, stats, data); } +static void bcm54xx_link_change_notify(struct phy_device *phydev) +{ + u16 mask = MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE | + MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE; + int ret; + + if (phydev->state != PHY_RUNNING) + return; + + /* Don't change the DAC wake settings if auto power down + * is not requested. + */ + if (!(phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE)) + return; + + ret = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP08); + if (ret < 0) + return; + + /* Enable/disable 10BaseT auto and forced early DAC wake depending + * on the negotiated speed, those settings should only be done + * for 10Mbits/sec. + */ + if (phydev->speed == SPEED_10) + ret |= mask; + else + ret &= ~mask; + bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret); +} + static struct phy_driver broadcom_drivers[] = { { .phy_id = PHY_ID_BCM5411, @@ -715,6 +794,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5421, .phy_id_mask = 0xfffffff0, @@ -727,6 +807,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54210E, .phy_id_mask = 0xfffffff0, @@ -739,6 +820,9 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, + .suspend = bcm54xx_suspend, + .resume = bcm54xx_resume, }, { .phy_id = PHY_ID_BCM5461, .phy_id_mask = 0xfffffff0, @@ -751,6 +835,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54612E, .phy_id_mask = 0xfffffff0, @@ -763,6 +848,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54616S, .phy_id_mask = 0xfffffff0, @@ -774,6 +860,7 @@ static struct phy_driver broadcom_drivers[] = { .handle_interrupt = bcm_phy_handle_interrupt, .read_status = bcm54616s_read_status, .probe = bcm54616s_probe, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5464, .phy_id_mask = 0xfffffff0, @@ -788,6 +875,7 @@ static struct phy_driver broadcom_drivers[] = { .handle_interrupt = bcm_phy_handle_interrupt, .suspend = genphy_suspend, .resume = genphy_resume, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5481, .phy_id_mask = 0xfffffff0, @@ -801,6 +889,7 @@ static struct phy_driver broadcom_drivers[] = { .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54810, .phy_id_mask = 0xfffffff0, @@ -814,8 +903,9 @@ static struct phy_driver broadcom_drivers[] = { .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, - .suspend = genphy_suspend, + .suspend = bcm54xx_suspend, .resume = bcm54xx_resume, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM54811, .phy_id_mask = 0xfffffff0, @@ -829,8 +919,9 @@ static struct phy_driver broadcom_drivers[] = { .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, - .suspend = genphy_suspend, + .suspend = bcm54xx_suspend, .resume = bcm54xx_resume, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM5482, .phy_id_mask = 0xfffffff0, @@ -843,6 +934,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM50610, .phy_id_mask = 0xfffffff0, @@ -855,6 +947,9 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, + .suspend = bcm54xx_suspend, + .resume = bcm54xx_resume, }, { .phy_id = PHY_ID_BCM50610M, .phy_id_mask = 0xfffffff0, @@ -867,6 +962,9 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, + .suspend = bcm54xx_suspend, + .resume = bcm54xx_resume, }, { .phy_id = PHY_ID_BCM57780, .phy_id_mask = 0xfffffff0, @@ -879,6 +977,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCMAC131, .phy_id_mask = 0xfffffff0, @@ -905,6 +1004,7 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm54xx_get_stats, .probe = bcm54xx_phy_probe, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM53125, .phy_id_mask = 0xfffffff0, @@ -918,6 +1018,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, }, { .phy_id = PHY_ID_BCM89610, .phy_id_mask = 0xfffffff0, @@ -930,6 +1031,7 @@ static struct phy_driver broadcom_drivers[] = { .config_init = bcm54xx_config_init, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .link_change_notify = bcm54xx_link_change_notify, } }; module_phy_driver(broadcom_drivers); diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index bd310e8d5e43..b6fea119fe13 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -22,6 +22,7 @@ * If both the fiber and copper ports are connected, the first to gain * link takes priority and the other port is completely locked out. */ +#include <linux/bitfield.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/hwmon.h> @@ -33,6 +34,8 @@ #define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe #define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa) +#define MV_VERSION(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d)) + enum { MV_PMA_FW_VER0 = 0xc011, MV_PMA_FW_VER1 = 0xc012, @@ -62,6 +65,15 @@ enum { MV_PCS_CSCR1_MDIX_MDIX = 0x0020, MV_PCS_CSCR1_MDIX_AUTO = 0x0060, + MV_PCS_DSC1 = 0x8003, + MV_PCS_DSC1_ENABLE = BIT(9), + MV_PCS_DSC1_10GBT = 0x01c0, + MV_PCS_DSC1_1GBR = 0x0038, + MV_PCS_DSC1_100BTX = 0x0007, + MV_PCS_DSC2 = 0x8004, + MV_PCS_DSC2_2P5G = 0xf000, + MV_PCS_DSC2_5G = 0x0f00, + MV_PCS_CSSR1 = 0x8008, MV_PCS_CSSR1_SPD1_MASK = 0xc000, MV_PCS_CSSR1_SPD1_SPD2 = 0xc000, @@ -125,6 +137,7 @@ enum { }; struct mv3310_chip { + bool (*has_downshift)(struct phy_device *phydev); void (*init_supported_interfaces)(unsigned long *mask); int (*get_mactype)(struct phy_device *phydev); int (*init_interface)(struct phy_device *phydev, int mactype); @@ -138,6 +151,7 @@ struct mv3310_priv { DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX); u32 firmware_ver; + bool has_downshift; bool rate_match; phy_interface_t const_interface; @@ -330,6 +344,71 @@ static int mv3310_reset(struct phy_device *phydev, u32 unit) 5000, 100000, true); } +static int mv3310_get_downshift(struct phy_device *phydev, u8 *ds) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + int val; + + if (!priv->has_downshift) + return -EOPNOTSUPP; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1); + if (val < 0) + return val; + + if (val & MV_PCS_DSC1_ENABLE) + /* assume that all fields are the same */ + *ds = 1 + FIELD_GET(MV_PCS_DSC1_10GBT, (u16)val); + else + *ds = DOWNSHIFT_DEV_DISABLE; + + return 0; +} + +static int mv3310_set_downshift(struct phy_device *phydev, u8 ds) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + u16 val; + int err; + + if (!priv->has_downshift) + return -EOPNOTSUPP; + + if (ds == DOWNSHIFT_DEV_DISABLE) + return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1, + MV_PCS_DSC1_ENABLE); + + /* DOWNSHIFT_DEV_DEFAULT_COUNT is confusing. It looks like it should + * set the default settings for the PHY. However, it is used for + * "ethtool --set-phy-tunable ethN downshift on". The intention is + * to enable downshift at a default number of retries. The default + * settings for 88x3310 are for two retries with downshift disabled. + * So let's use two retries with downshift enabled. + */ + if (ds == DOWNSHIFT_DEV_DEFAULT_COUNT) + ds = 2; + + if (ds > 8) + return -E2BIG; + + ds -= 1; + val = FIELD_PREP(MV_PCS_DSC2_2P5G, ds); + val |= FIELD_PREP(MV_PCS_DSC2_5G, ds); + err = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC2, + MV_PCS_DSC2_2P5G | MV_PCS_DSC2_5G, val); + if (err < 0) + return err; + + val = MV_PCS_DSC1_ENABLE; + val |= FIELD_PREP(MV_PCS_DSC1_10GBT, ds); + val |= FIELD_PREP(MV_PCS_DSC1_1GBR, ds); + val |= FIELD_PREP(MV_PCS_DSC1_100BTX, ds); + + return phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1, + MV_PCS_DSC1_ENABLE | MV_PCS_DSC1_10GBT | + MV_PCS_DSC1_1GBR | MV_PCS_DSC1_100BTX, val); +} + static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd) { int val; @@ -448,6 +527,9 @@ static int mv3310_probe(struct phy_device *phydev) priv->firmware_ver >> 24, (priv->firmware_ver >> 16) & 255, (priv->firmware_ver >> 8) & 255, priv->firmware_ver & 255); + if (chip->has_downshift) + priv->has_downshift = chip->has_downshift(phydev); + /* Powering down the port when not in use saves about 600mW */ ret = mv3310_power_down(phydev); if (ret) @@ -616,7 +698,16 @@ static int mv3310_config_init(struct phy_device *phydev) } /* Enable EDPD mode - saving 600mW */ - return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS); + err = mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS); + if (err) + return err; + + /* Allow downshift */ + err = mv3310_set_downshift(phydev, DOWNSHIFT_DEV_DEFAULT_COUNT); + if (err && err != -EOPNOTSUPP) + return err; + + return 0; } static int mv3310_get_features(struct phy_device *phydev) @@ -886,6 +977,8 @@ static int mv3310_get_tunable(struct phy_device *phydev, struct ethtool_tunable *tuna, void *data) { switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return mv3310_get_downshift(phydev, data); case ETHTOOL_PHY_EDPD: return mv3310_get_edpd(phydev, data); default: @@ -897,6 +990,8 @@ static int mv3310_set_tunable(struct phy_device *phydev, struct ethtool_tunable *tuna, const void *data) { switch (tuna->id) { + case ETHTOOL_PHY_DOWNSHIFT: + return mv3310_set_downshift(phydev, *(u8 *)data); case ETHTOOL_PHY_EDPD: return mv3310_set_edpd(phydev, *(u16 *)data); default: @@ -904,6 +999,14 @@ static int mv3310_set_tunable(struct phy_device *phydev, } } +static bool mv3310_has_downshift(struct phy_device *phydev) +{ + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + + /* Fails to downshift with firmware older than v0.3.5.0 */ + return priv->firmware_ver >= MV_VERSION(0,3,5,0); +} + static void mv3310_init_supported_interfaces(unsigned long *mask) { __set_bit(PHY_INTERFACE_MODE_SGMII, mask); @@ -943,6 +1046,7 @@ static void mv2111_init_supported_interfaces(unsigned long *mask) } static const struct mv3310_chip mv3310_type = { + .has_downshift = mv3310_has_downshift, .init_supported_interfaces = mv3310_init_supported_interfaces, .get_mactype = mv3310_get_mactype, .init_interface = mv3310_init_interface, @@ -953,6 +1057,7 @@ static const struct mv3310_chip mv3310_type = { }; static const struct mv3310_chip mv3340_type = { + .has_downshift = mv3310_has_downshift, .init_supported_interfaces = mv3340_init_supported_interfaces, .get_mactype = mv3310_get_mactype, .init_interface = mv3340_init_interface, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 5c928f827173..c330a5a9f665 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1537,6 +1537,65 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev, return ret; } +#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16 +#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17 +#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000 + +#define LAN8804_ALIGN_SWAP 0x4a +#define LAN8804_ALIGN_TX_A_B_SWAP 0x1 +#define LAN8804_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0) +#define LAN8814_CLOCK_MANAGEMENT 0xd +#define LAN8814_LINK_QUALITY 0x8e + +static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr) +{ + u32 data; + + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, + (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC)); + data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA); + + return data; +} + +static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr, + u16 val) +{ + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr); + phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, + (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC)); + + val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val); + if (val) { + phydev_err(phydev, "Error: phy_write has returned error %d\n", + val); + return val; + } + return 0; +} + +static int lan8804_config_init(struct phy_device *phydev) +{ + int val; + + /* MDI-X setting for swap A,B transmit */ + val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP); + val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK; + val |= LAN8804_ALIGN_TX_A_B_SWAP; + lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val); + + /* Make sure that the PHY will not stop generating the clock when the + * link partner goes down + */ + lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e); + lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY); + + return 0; +} + static struct phy_driver ksphy_driver[] = { { .phy_id = PHY_ID_KS8737, @@ -1719,6 +1778,20 @@ static struct phy_driver ksphy_driver[] = { .suspend = genphy_suspend, .resume = kszphy_resume, }, { + .phy_id = PHY_ID_LAN8804, + .phy_id_mask = MICREL_PHY_ID_MASK, + .name = "Microchip LAN966X Gigabit PHY", + .config_init = lan8804_config_init, + .driver_data = &ksz9021_type, + .probe = kszphy_probe, + .soft_reset = genphy_soft_reset, + .read_status = ksz9031_read_status, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = kszphy_resume, +}, { .phy_id = PHY_ID_KSZ9131, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9131 Gigabit PHY", @@ -1794,6 +1867,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK }, { PHY_ID_LAN8814, MICREL_PHY_ID_MASK }, + { PHY_ID_LAN8804, MICREL_PHY_ID_MASK }, { } }; diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0a0abe8e4be0..5a58c77d0002 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1333,7 +1333,10 @@ void phylink_suspend(struct phylink *pl, bool mac_wol) * but one would hope all packets have been sent. This * also means phylink_resolve() will do nothing. */ - netif_carrier_off(pl->netdev); + if (pl->netdev) + netif_carrier_off(pl->netdev); + else + pl->old_link_state = false; /* We do not call mac_link_down() here as we want the * link to remain up to receive the WoL packets. diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 79bd2585ec6b..2ed49884565f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -80,6 +80,7 @@ struct virtnet_sq_stats { u64 xdp_tx; u64 xdp_tx_drops; u64 kicks; + u64 tx_timeouts; }; struct virtnet_rq_stats { @@ -103,6 +104,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, { "kicks", VIRTNET_SQ_STAT(kicks) }, + { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, }; static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { @@ -1860,7 +1862,7 @@ static void virtnet_stats(struct net_device *dev, int i; for (i = 0; i < vi->max_queue_pairs; i++) { - u64 tpackets, tbytes, rpackets, rbytes, rdrops; + u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops; struct receive_queue *rq = &vi->rq[i]; struct send_queue *sq = &vi->sq[i]; @@ -1868,6 +1870,7 @@ static void virtnet_stats(struct net_device *dev, start = u64_stats_fetch_begin_irq(&sq->stats.syncp); tpackets = sq->stats.packets; tbytes = sq->stats.bytes; + terrors = sq->stats.tx_timeouts; } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); do { @@ -1882,6 +1885,7 @@ static void virtnet_stats(struct net_device *dev, tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; tot->rx_dropped += rdrops; + tot->tx_errors += terrors; } tot->tx_dropped = dev->stats.tx_dropped; @@ -2534,8 +2538,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, /* XDP requires extra queues for XDP_TX */ if (curr_qp + xdp_qp > vi->max_queue_pairs) { - netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", - curr_qp + xdp_qp, vi->max_queue_pairs); + netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n", + curr_qp + xdp_qp, vi->max_queue_pairs); xdp_qp = 0; } @@ -2663,6 +2667,21 @@ static int virtnet_set_features(struct net_device *dev, return 0; } +static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct virtnet_info *priv = netdev_priv(dev); + struct send_queue *sq = &priv->sq[txqueue]; + struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); + + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.tx_timeouts++; + u64_stats_update_end(&sq->stats.syncp); + + netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n", + txqueue, sq->name, sq->vq->index, sq->vq->name, + jiffies_to_usecs(jiffies - txq->trans_start)); +} + static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@ -2678,6 +2697,7 @@ static const struct net_device_ops virtnet_netdev = { .ndo_features_check = passthru_features_check, .ndo_get_phys_port_name = virtnet_get_phys_port_name, .ndo_set_features = virtnet_set_features, + .ndo_tx_timeout = virtnet_tx_timeout, }; static void virtnet_config_changed_work(struct work_struct *work) diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index 77dbfc418bce..17543be14665 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -71,6 +71,7 @@ config RPMSG_WWAN_CTRL config IOSM tristate "IOSM Driver for Intel M.2 WWAN Device" depends on INTEL_IOMMU + select NET_DEVLINK help This driver enables Intel M.2 WWAN Device communication. diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile index 4f9f0ae398e1..b838034bb120 100644 --- a/drivers/net/wwan/iosm/Makefile +++ b/drivers/net/wwan/iosm/Makefile @@ -18,6 +18,9 @@ iosm-y = \ iosm_ipc_protocol.o \ iosm_ipc_protocol_ops.o \ iosm_ipc_mux.o \ - iosm_ipc_mux_codec.o + iosm_ipc_mux_codec.o \ + iosm_ipc_devlink.o \ + iosm_ipc_flash.o \ + iosm_ipc_coredump.o obj-$(CONFIG_IOSM) := iosm.o diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c index 519361ec40df..128c999e08bb 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c +++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c @@ -8,7 +8,7 @@ #include "iosm_ipc_chnl_cfg.h" /* Max. sizes of a downlink buffers */ -#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (16 * 1024) +#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (64 * 1024) #define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024) #define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048 #define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024) @@ -60,6 +60,10 @@ static struct ipc_chnl_cfg modem_cfg[] = { { IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13, IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM }, + /* Flash Channel/Coredump Channel */ + { IPC_MEM_CTRL_CHL_ID_7, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1, + IPC_MEM_MAX_TDS_FLASH_UL, IPC_MEM_MAX_TDS_FLASH_DL, + IPC_MEM_MAX_DL_FLASH_BUF_SIZE, WWAN_PORT_UNKNOWN }, }; int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index) diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h index 422471367f78..e77084e76718 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h +++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h @@ -23,6 +23,7 @@ enum ipc_channel_id { IPC_MEM_CTRL_CHL_ID_4, IPC_MEM_CTRL_CHL_ID_5, IPC_MEM_CTRL_CHL_ID_6, + IPC_MEM_CTRL_CHL_ID_7, }; /** diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c new file mode 100644 index 000000000000..9acd87724c9d --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include "iosm_ipc_coredump.h" + +/** + * ipc_coredump_collect - To collect coredump + * @devlink: Pointer to devlink instance. + * @data: Pointer to snapshot + * @entry: ID of requested snapshot + * @region_size: Region size + * + * Returns: 0 on success, error on failure + */ +int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry, + u32 region_size) +{ + int ret, bytes_to_read, bytes_read = 0, i = 0; + s32 remaining; + u8 *data_ptr; + + data_ptr = vmalloc(region_size); + if (!data_ptr) + return -ENOMEM; + + remaining = devlink->cd_file_info[entry].actual_size; + ret = ipc_devlink_send_cmd(devlink, rpsi_cmd_coredump_get, entry); + if (ret) { + dev_err(devlink->dev, "Send coredump_get cmd failed"); + goto get_cd_fail; + } + while (remaining > 0) { + bytes_to_read = min(remaining, MAX_DATA_SIZE); + bytes_read = 0; + ret = ipc_imem_sys_devlink_read(devlink, data_ptr + i, + bytes_to_read, &bytes_read); + if (ret) { + dev_err(devlink->dev, "CD data read failed"); + goto get_cd_fail; + } + remaining -= bytes_read; + i += bytes_read; + } + + *data = data_ptr; + + return 0; + +get_cd_fail: + vfree(data_ptr); + return ret; +} + +/** + * ipc_coredump_get_list - Get coredump list from modem + * @devlink: Pointer to devlink instance. + * @cmd: RPSI command to be sent + * + * Returns: 0 on success, error on failure + */ +int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd) +{ + u32 byte_read, num_entries, file_size; + struct iosm_cd_table *cd_table; + u8 size[MAX_SIZE_LEN], i; + char *filename; + int ret; + + cd_table = kzalloc(MAX_CD_LIST_SIZE, GFP_KERNEL); + if (!cd_table) { + ret = -ENOMEM; + goto cd_init_fail; + } + + ret = ipc_devlink_send_cmd(devlink, cmd, MAX_CD_LIST_SIZE); + if (ret) { + dev_err(devlink->dev, "rpsi_cmd_coredump_start failed"); + goto cd_init_fail; + } + + ret = ipc_imem_sys_devlink_read(devlink, (u8 *)cd_table, + MAX_CD_LIST_SIZE, &byte_read); + if (ret) { + dev_err(devlink->dev, "Coredump data is invalid"); + goto cd_init_fail; + } + + if (byte_read != MAX_CD_LIST_SIZE) + goto cd_init_fail; + + if (cmd == rpsi_cmd_coredump_start) { + num_entries = le32_to_cpu(cd_table->list.num_entries); + if (num_entries == 0 || num_entries > IOSM_NOF_CD_REGION) { + ret = -EINVAL; + goto cd_init_fail; + } + + for (i = 0; i < num_entries; i++) { + file_size = le32_to_cpu(cd_table->list.entry[i].size); + filename = cd_table->list.entry[i].filename; + + if (file_size > devlink->cd_file_info[i].default_size) { + ret = -EINVAL; + goto cd_init_fail; + } + + devlink->cd_file_info[i].actual_size = file_size; + dev_dbg(devlink->dev, "file: %s actual size %d", + filename, file_size); + devlink_flash_update_status_notify(devlink->devlink_ctx, + filename, + "FILENAME", 0, 0); + snprintf(size, sizeof(size), "%d", file_size); + devlink_flash_update_status_notify(devlink->devlink_ctx, + size, "FILE SIZE", + 0, 0); + } + } + +cd_init_fail: + kfree(cd_table); + return ret; +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h new file mode 100644 index 000000000000..0809ba664276 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef _IOSM_IPC_COREDUMP_H_ +#define _IOSM_IPC_COREDUMP_H_ + +#include "iosm_ipc_devlink.h" + +/* Max number of bytes to receive for Coredump list structure */ +#define MAX_CD_LIST_SIZE 0x1000 + +/* Max buffer allocated to receive coredump data */ +#define MAX_DATA_SIZE 0x00010000 + +/* Max number of file entries */ +#define MAX_NOF_ENTRY 256 + +/* Max length */ +#define MAX_SIZE_LEN 32 + +/** + * struct iosm_cd_list_entry - Structure to hold coredump file info. + * @size: Number of bytes for the entry + * @filename: Coredump filename to be generated on host + */ +struct iosm_cd_list_entry { + __le32 size; + char filename[IOSM_MAX_FILENAME_LEN]; +} __packed; + +/** + * struct iosm_cd_list - Structure to hold list of coredump files + * to be collected. + * @num_entries: Number of entries to be received + * @entry: Contains File info + */ +struct iosm_cd_list { + __le32 num_entries; + struct iosm_cd_list_entry entry[MAX_NOF_ENTRY]; +} __packed; + +/** + * struct iosm_cd_table - Common Coredump table + * @version: Version of coredump structure + * @list: Coredump list structure + */ +struct iosm_cd_table { + __le32 version; + struct iosm_cd_list list; +} __packed; + +int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry, + u32 region_size); + +int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd); + +#endif /* _IOSM_IPC_COREDUMP_H_ */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c new file mode 100644 index 000000000000..6fe56f73011b --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include "iosm_ipc_chnl_cfg.h" +#include "iosm_ipc_coredump.h" +#include "iosm_ipc_devlink.h" +#include "iosm_ipc_flash.h" + +/* Coredump list */ +static struct iosm_coredump_file_info list[IOSM_NOF_CD_REGION] = { + {"report.json", REPORT_JSON_SIZE,}, + {"coredump.fcd", COREDUMP_FCD_SIZE,}, + {"cdd.log", CDD_LOG_SIZE,}, + {"eeprom.bin", EEPROM_BIN_SIZE,}, + {"bootcore_trace.bin", BOOTCORE_TRC_BIN_SIZE,}, + {"bootcore_prev_trace.bin", BOOTCORE_PREV_TRC_BIN_SIZE,}, +}; + +/* Get the param values for the specific param ID's */ +static int ipc_devlink_get_param(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(dl); + int rc = 0; + + switch (id) { + case IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: + ctx->val.vu8 = ipc_devlink->param.erase_full_flash; + break; + + case IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION: + ctx->val.vu8 = ipc_devlink->param.download_region; + break; + + case IOSM_DEVLINK_PARAM_ID_ADDRESS: + ctx->val.vu32 = ipc_devlink->param.address; + break; + + case IOSM_DEVLINK_PARAM_ID_REGION_COUNT: + ctx->val.vu8 = ipc_devlink->param.region_count; + break; + + default: + rc = -EOPNOTSUPP; + break; + } + + return rc; +} + +/* Set the param values for the specific param ID's */ +static int ipc_devlink_set_param(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(dl); + int rc = 0; + + switch (id) { + case IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: + ipc_devlink->param.erase_full_flash = ctx->val.vu8; + break; + + case IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION: + ipc_devlink->param.download_region = ctx->val.vu8; + break; + + case IOSM_DEVLINK_PARAM_ID_ADDRESS: + ipc_devlink->param.address = ctx->val.vu32; + break; + + case IOSM_DEVLINK_PARAM_ID_REGION_COUNT: + ipc_devlink->param.region_count = ctx->val.vu8; + break; + + default: + rc = -EOPNOTSUPP; + break; + } + + return rc; +} + +/* Devlink param structure array */ +static const struct devlink_param iosm_devlink_params[] = { + DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH, + "erase_full_flash", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ipc_devlink_get_param, ipc_devlink_set_param, + NULL), + DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION, + "download_region", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ipc_devlink_get_param, ipc_devlink_set_param, + NULL), + DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ADDRESS, + "address", DEVLINK_PARAM_TYPE_U32, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ipc_devlink_get_param, ipc_devlink_set_param, + NULL), + DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_REGION_COUNT, + "region_count", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ipc_devlink_get_param, ipc_devlink_set_param, + NULL), +}; + +/* Get devlink flash component type */ +static enum iosm_flash_comp_type +ipc_devlink_get_flash_comp_type(const char comp_str[], u32 len) +{ + enum iosm_flash_comp_type fls_type; + + if (!strncmp("PSI", comp_str, len)) + fls_type = FLASH_COMP_TYPE_PSI; + else if (!strncmp("EBL", comp_str, len)) + fls_type = FLASH_COMP_TYPE_EBL; + else if (!strncmp("FLS", comp_str, len)) + fls_type = FLASH_COMP_TYPE_FLS; + else + fls_type = FLASH_COMP_TYPE_INVAL; + + return fls_type; +} + +/* Function triggered on devlink flash command + * Flash update function which calls multiple functions based on + * component type specified in the flash command + */ +static int ipc_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(devlink); + enum iosm_flash_comp_type fls_type; + int rc = -EINVAL; + u8 *mdm_rsp; + + if (!params->component) + return -EINVAL; + + mdm_rsp = kzalloc(IOSM_EBL_DW_PACK_SIZE, GFP_KERNEL); + if (!mdm_rsp) + return -ENOMEM; + + fls_type = ipc_devlink_get_flash_comp_type(params->component, + strlen(params->component)); + + switch (fls_type) { + case FLASH_COMP_TYPE_PSI: + rc = ipc_flash_boot_psi(ipc_devlink, params->fw); + break; + case FLASH_COMP_TYPE_EBL: + rc = ipc_flash_boot_ebl(ipc_devlink, params->fw); + if (rc) + break; + rc = ipc_flash_boot_set_capabilities(ipc_devlink, mdm_rsp); + if (rc) + break; + rc = ipc_flash_read_swid(ipc_devlink, mdm_rsp); + break; + case FLASH_COMP_TYPE_FLS: + rc = ipc_flash_send_fls(ipc_devlink, params->fw, mdm_rsp); + break; + default: + devlink_flash_update_status_notify(devlink, "Invalid component", + params->component, 0, 0); + break; + } + + if (!rc) + devlink_flash_update_status_notify(devlink, "Flashing success", + params->component, 0, 0); + else + devlink_flash_update_status_notify(devlink, "Flashing failed", + params->component, 0, 0); + + kfree(mdm_rsp); + return rc; +} + +/* Call back function for devlink ops */ +static const struct devlink_ops devlink_flash_ops = { + .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT, + .flash_update = ipc_devlink_flash_update, +}; + +/** + * ipc_devlink_send_cmd - Send command to Modem + * @ipc_devlink: Pointer to struct iosm_devlink + * @cmd: Command to be sent to modem + * @entry: Command entry number + * + * Returns: 0 on success and failure value on error + */ +int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry) +{ + struct iosm_rpsi_cmd rpsi_cmd; + + rpsi_cmd.param.dword = cpu_to_le32(entry); + rpsi_cmd.cmd = cpu_to_le16(cmd); + rpsi_cmd.crc = rpsi_cmd.param.word[0] ^ rpsi_cmd.param.word[1] ^ + rpsi_cmd.cmd; + + return ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&rpsi_cmd, + sizeof(rpsi_cmd)); +} + +/* Function to create snapshot */ +static int ipc_devlink_coredump_snapshot(struct devlink *dl, + const struct devlink_region_ops *ops, + struct netlink_ext_ack *extack, + u8 **data) +{ + struct iosm_devlink *ipc_devlink = devlink_priv(dl); + struct iosm_coredump_file_info *cd_list = ops->priv; + u32 region_size; + int rc; + + dev_dbg(ipc_devlink->dev, "Region:%s, ID:%d", ops->name, + cd_list->entry); + region_size = cd_list->default_size; + rc = ipc_coredump_collect(ipc_devlink, data, cd_list->entry, + region_size); + if (rc) { + dev_err(ipc_devlink->dev, "Fail to create snapshot,err %d", rc); + goto coredump_collect_err; + } + + /* Send coredump end cmd indicating end of coredump collection */ + if (cd_list->entry == (IOSM_NOF_CD_REGION - 1)) + ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end); + + return 0; + +coredump_collect_err: + ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end); + return rc; +} + +/* To create regions for coredump files */ +static int ipc_devlink_create_region(struct iosm_devlink *devlink) +{ + struct devlink_region_ops *mdm_coredump; + int rc = 0; + int i; + + mdm_coredump = devlink->iosm_devlink_mdm_coredump; + for (i = 0; i < IOSM_NOF_CD_REGION; i++) { + mdm_coredump[i].name = list[i].filename; + mdm_coredump[i].snapshot = ipc_devlink_coredump_snapshot; + mdm_coredump[i].destructor = vfree; + devlink->cd_regions[i] = + devlink_region_create(devlink->devlink_ctx, + &mdm_coredump[i], MAX_SNAPSHOTS, + list[i].default_size); + + if (IS_ERR(devlink->cd_regions[i])) { + rc = PTR_ERR(devlink->cd_regions[i]); + dev_err(devlink->dev, "Devlink region fail,err %d", rc); + /* Delete previously created regions */ + for ( ; i >= 0; i--) + devlink_region_destroy(devlink->cd_regions[i]); + goto region_create_fail; + } + list[i].entry = i; + mdm_coredump[i].priv = list + i; + } +region_create_fail: + return rc; +} + +/* To Destroy devlink regions */ +static void ipc_devlink_destroy_region(struct iosm_devlink *ipc_devlink) +{ + u8 i; + + for (i = 0; i < IOSM_NOF_CD_REGION; i++) + devlink_region_destroy(ipc_devlink->cd_regions[i]); +} + +/** + * ipc_devlink_init - Initialize/register devlink to IOSM driver + * @ipc_imem: Pointer to struct iosm_imem + * + * Returns: Pointer to iosm_devlink on success and NULL on failure + */ +struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem) +{ + struct ipc_chnl_cfg chnl_cfg_flash = { 0 }; + struct iosm_devlink *ipc_devlink; + struct devlink *devlink_ctx; + int rc; + + devlink_ctx = devlink_alloc(&devlink_flash_ops, + sizeof(struct iosm_devlink), + ipc_imem->dev); + if (!devlink_ctx) { + dev_err(ipc_imem->dev, "devlink_alloc failed"); + goto devlink_alloc_fail; + } + + ipc_devlink = devlink_priv(devlink_ctx); + ipc_devlink->devlink_ctx = devlink_ctx; + ipc_devlink->pcie = ipc_imem->pcie; + ipc_devlink->dev = ipc_imem->dev; + + rc = devlink_params_register(devlink_ctx, iosm_devlink_params, + ARRAY_SIZE(iosm_devlink_params)); + if (rc) { + dev_err(ipc_devlink->dev, + "devlink_params_register failed. rc %d", rc); + goto param_reg_fail; + } + + ipc_devlink->cd_file_info = list; + + rc = ipc_devlink_create_region(ipc_devlink); + if (rc) { + dev_err(ipc_devlink->dev, "Devlink Region create failed, rc %d", + rc); + goto region_create_fail; + } + + if (ipc_chnl_cfg_get(&chnl_cfg_flash, IPC_MEM_CTRL_CHL_ID_7) < 0) + goto chnl_get_fail; + + ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL, + chnl_cfg_flash, IRQ_MOD_OFF); + + init_completion(&ipc_devlink->devlink_sio.read_sem); + skb_queue_head_init(&ipc_devlink->devlink_sio.rx_list); + + devlink_register(devlink_ctx); + dev_dbg(ipc_devlink->dev, "iosm devlink register success"); + + return ipc_devlink; + +chnl_get_fail: + ipc_devlink_destroy_region(ipc_devlink); +region_create_fail: + devlink_params_unregister(devlink_ctx, iosm_devlink_params, + ARRAY_SIZE(iosm_devlink_params)); +param_reg_fail: + devlink_free(devlink_ctx); +devlink_alloc_fail: + return NULL; +} + +/** + * ipc_devlink_deinit - To unintialize the devlink from IOSM driver. + * @ipc_devlink: Devlink instance + */ +void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink) +{ + struct devlink *devlink_ctx = ipc_devlink->devlink_ctx; + + devlink_unregister(devlink_ctx); + ipc_devlink_destroy_region(ipc_devlink); + devlink_params_unregister(devlink_ctx, iosm_devlink_params, + ARRAY_SIZE(iosm_devlink_params)); + if (ipc_devlink->devlink_sio.devlink_read_pend) { + complete(&ipc_devlink->devlink_sio.read_sem); + complete(&ipc_devlink->devlink_sio.channel->ul_sem); + } + if (!ipc_devlink->devlink_sio.devlink_read_pend) + skb_queue_purge(&ipc_devlink->devlink_sio.rx_list); + + ipc_imem_sys_devlink_close(ipc_devlink); + devlink_free(devlink_ctx); +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.h b/drivers/net/wwan/iosm/iosm_ipc_devlink.h new file mode 100644 index 000000000000..fa2b388a2f8a --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef _IOSM_IPC_DEVLINK_H_ +#define _IOSM_IPC_DEVLINK_H_ + +#include <net/devlink.h> + +#include "iosm_ipc_imem.h" +#include "iosm_ipc_imem_ops.h" +#include "iosm_ipc_pcie.h" + +/* MAX file name length */ +#define IOSM_MAX_FILENAME_LEN 32 +/* EBL response size */ +#define IOSM_EBL_RSP_SIZE 76 +/* MAX number of regions supported */ +#define IOSM_NOF_CD_REGION 6 +/* MAX number of SNAPSHOTS supported */ +#define MAX_SNAPSHOTS 1 +/* Default Coredump file size */ +#define REPORT_JSON_SIZE 0x800 +#define COREDUMP_FCD_SIZE 0x10E00000 +#define CDD_LOG_SIZE 0x30000 +#define EEPROM_BIN_SIZE 0x10000 +#define BOOTCORE_TRC_BIN_SIZE 0x8000 +#define BOOTCORE_PREV_TRC_BIN_SIZE 0x20000 + +/** + * enum iosm_devlink_param_id - Enum type to different devlink params + * @IOSM_DEVLINK_PARAM_ID_BASE: Devlink param base ID + * @IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: Set if full erase required + * @IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION: Set if fls file to be + * flashed is Loadmap/region file + * @IOSM_DEVLINK_PARAM_ID_ADDRESS: Address of the region to be + * flashed + * @IOSM_DEVLINK_PARAM_ID_REGION_COUNT: Max region count + */ + +enum iosm_devlink_param_id { + IOSM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH, + IOSM_DEVLINK_PARAM_ID_DOWNLOAD_REGION, + IOSM_DEVLINK_PARAM_ID_ADDRESS, + IOSM_DEVLINK_PARAM_ID_REGION_COUNT, +}; + +/** + * enum iosm_rpsi_cmd_code - Enum type for RPSI command list + * @rpsi_cmd_code_ebl: Command to load ebl + * @rpsi_cmd_coredump_start: Command to get list of files and + * file size info from PSI + * @rpsi_cmd_coredump_get: Command to get the coredump data + * @rpsi_cmd_coredump_end: Command to stop receiving the coredump + */ +enum iosm_rpsi_cmd_code { + rpsi_cmd_code_ebl = 0x02, + rpsi_cmd_coredump_start = 0x10, + rpsi_cmd_coredump_get = 0x11, + rpsi_cmd_coredump_end = 0x12, +}; + +/** + * enum iosm_flash_comp_type - Enum for different flash component types + * @FLASH_COMP_TYPE_PSI: PSI flash comp type + * @FLASH_COMP_TYPE_EBL: EBL flash comp type + * @FLASH_COMP_TYPE_FLS: FLS flash comp type + * @FLASH_COMP_TYPE_INVAL: Invalid flash comp type + */ +enum iosm_flash_comp_type { + FLASH_COMP_TYPE_PSI, + FLASH_COMP_TYPE_EBL, + FLASH_COMP_TYPE_FLS, + FLASH_COMP_TYPE_INVAL, +}; + +/** + * struct iosm_devlink_sio - SIO instance + * @rx_list: Downlink skbuf list received from CP + * @read_sem: Needed for the blocking read or downlink transfer + * @channel_id: Reserved channel id for flashing/CD collection to RAM + * @channel: Channel instance for flashing and coredump + * @devlink_read_pend: Check if read is pending + */ +struct iosm_devlink_sio { + struct sk_buff_head rx_list; + struct completion read_sem; + int channel_id; + struct ipc_mem_channel *channel; + u32 devlink_read_pend; +}; + +/** + * struct iosm_flash_params - List of flash params required for flashing + * @address: Address of the region file to be flashed + * @region_count: Maximum no of regions for each fls file + * @download_region: To be set if region is being flashed + * @erase_full_flash: To set the flashing mode + * erase_full_flash = 1; full erase + * erase_full_flash = 0; no erase + * @erase_full_flash_done: Flag to check if it is a full erase + */ +struct iosm_flash_params { + u32 address; + u8 region_count; + u8 download_region; + u8 erase_full_flash; + u8 erase_full_flash_done; +}; + +/** + * struct iosm_ebl_ctx_data - EBL ctx data used during flashing + * @ebl_sw_info_version: SWID version info obtained from EBL + * @m_ebl_resp: Buffer used to read and write the ebl data + */ +struct iosm_ebl_ctx_data { + u8 ebl_sw_info_version; + u8 m_ebl_resp[IOSM_EBL_RSP_SIZE]; +}; + +/** + * struct iosm_coredump_file_info - Coredump file info + * @filename: Name of coredump file + * @default_size: Default size of coredump file + * @actual_size: Actual size of coredump file + * @entry: Index of the coredump file + */ +struct iosm_coredump_file_info { + char filename[IOSM_MAX_FILENAME_LEN]; + u32 default_size; + u32 actual_size; + u32 entry; +}; + +/** + * struct iosm_devlink - IOSM Devlink structure + * @devlink_sio: SIO instance for read/write functionality + * @pcie: Pointer to PCIe component + * @dev: Pointer to device struct + * @devlink_ctx: Pointer to devlink context + * @param: Params required for flashing + * @ebl_ctx: Data to be read and written to Modem + * @cd_file_info: coredump file info + * @iosm_devlink_mdm_coredump: region ops for coredump collection + * @cd_regions: coredump regions + */ +struct iosm_devlink { + struct iosm_devlink_sio devlink_sio; + struct iosm_pcie *pcie; + struct device *dev; + struct devlink *devlink_ctx; + struct iosm_flash_params param; + struct iosm_ebl_ctx_data ebl_ctx; + struct iosm_coredump_file_info *cd_file_info; + struct devlink_region_ops iosm_devlink_mdm_coredump[IOSM_NOF_CD_REGION]; + struct devlink_region *cd_regions[IOSM_NOF_CD_REGION]; +}; + +/** + * union iosm_rpsi_param_u - RPSI cmd param for CRC calculation + * @word: Words member used in CRC calculation + * @dword: Actual data + */ +union iosm_rpsi_param_u { + __le16 word[2]; + __le32 dword; +}; + +/** + * struct iosm_rpsi_cmd - Structure for RPSI Command + * @param: Used to calculate CRC + * @cmd: Stores the RPSI command + * @crc: Stores the CRC value + */ +struct iosm_rpsi_cmd { + union iosm_rpsi_param_u param; + __le16 cmd; + __le16 crc; +}; + +struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem); + +void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink); + +int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry); + +#endif /* _IOSM_IPC_DEVLINK_H */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.c b/drivers/net/wwan/iosm/iosm_ipc_flash.c new file mode 100644 index 000000000000..ebceedf7c9f5 --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_flash.c @@ -0,0 +1,588 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#include "iosm_ipc_coredump.h" +#include "iosm_ipc_devlink.h" +#include "iosm_ipc_flash.h" + +/* This function will pack the data to be sent to the modem using the + * payload, payload length and pack id + */ +static int ipc_flash_proc_format_ebl_pack(struct iosm_flash_data *flash_req, + u32 pack_length, u16 pack_id, + u8 *payload, u32 payload_length) +{ + u16 checksum = pack_id; + u32 i; + + if (payload_length + IOSM_EBL_HEAD_SIZE > pack_length) + return -EINVAL; + + flash_req->pack_id = cpu_to_le16(pack_id); + flash_req->msg_length = cpu_to_le32(payload_length); + checksum += (payload_length >> IOSM_EBL_PAYL_SHIFT) + + (payload_length & IOSM_EBL_CKSM); + + for (i = 0; i < payload_length; i++) + checksum += payload[i]; + + flash_req->checksum = cpu_to_le16(checksum); + + return 0; +} + +/* validate the response received from modem and + * check the type of errors received + */ +static int ipc_flash_proc_check_ebl_rsp(void *hdr_rsp, void *payload_rsp) +{ + struct iosm_ebl_error *err_info = payload_rsp; + u16 *rsp_code = hdr_rsp; + u32 i; + + if (*rsp_code == IOSM_EBL_RSP_BUFF) { + for (i = 0; i < IOSM_MAX_ERRORS; i++) { + if (!err_info->error[i].error_code) { + pr_err("EBL: error_class = %d, error_code = %d", + err_info->error[i].error_class, + err_info->error[i].error_code); + } + } + return -EINVAL; + } + + return 0; +} + +/* Send data to the modem */ +static int ipc_flash_send_data(struct iosm_devlink *ipc_devlink, u32 size, + u16 pack_id, u8 *payload, u32 payload_length) +{ + struct iosm_flash_data flash_req; + int ret; + + ret = ipc_flash_proc_format_ebl_pack(&flash_req, size, + pack_id, payload, payload_length); + if (ret) { + dev_err(ipc_devlink->dev, "EBL2 pack failed for pack_id:%d", + pack_id); + goto ipc_free_payload; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&flash_req, + IOSM_EBL_HEAD_SIZE); + if (ret) { + dev_err(ipc_devlink->dev, "EBL Header write failed for Id:%x", + pack_id); + goto ipc_free_payload; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, payload, payload_length); + if (ret) { + dev_err(ipc_devlink->dev, "EBL Payload write failed for Id:%x", + pack_id); + } + +ipc_free_payload: + return ret; +} + +/** + * ipc_flash_link_establish - Flash link establishment + * @ipc_imem: Pointer to struct iosm_imem + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_link_establish(struct iosm_imem *ipc_imem) +{ + u8 ler_data[IOSM_LER_RSP_SIZE]; + u32 bytes_read; + + /* Allocate channel for flashing/cd collection */ + ipc_imem->ipc_devlink->devlink_sio.channel = + ipc_imem_sys_devlink_open(ipc_imem); + + if (!ipc_imem->ipc_devlink->devlink_sio.channel) + goto chl_open_fail; + + if (ipc_imem_sys_devlink_read(ipc_imem->ipc_devlink, ler_data, + IOSM_LER_RSP_SIZE, &bytes_read)) + goto devlink_read_fail; + + if (bytes_read != IOSM_LER_RSP_SIZE) + goto devlink_read_fail; + + return 0; + +devlink_read_fail: + ipc_imem_sys_devlink_close(ipc_imem->ipc_devlink); +chl_open_fail: + return -EIO; +} + +/* Receive data from the modem */ +static int ipc_flash_receive_data(struct iosm_devlink *ipc_devlink, u32 size, + u8 *mdm_rsp) +{ + u8 mdm_rsp_hdr[IOSM_EBL_HEAD_SIZE]; + u32 bytes_read; + int ret; + + ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp_hdr, + IOSM_EBL_HEAD_SIZE, &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed", + IOSM_EBL_HEAD_SIZE); + goto ipc_flash_recv_err; + } + + if (bytes_read != IOSM_EBL_HEAD_SIZE) { + ret = -EINVAL; + goto ipc_flash_recv_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp, size, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed", + size); + goto ipc_flash_recv_err; + } + + if (bytes_read != size) { + ret = -EINVAL; + goto ipc_flash_recv_err; + } + + ret = ipc_flash_proc_check_ebl_rsp(mdm_rsp_hdr + 2, mdm_rsp); + +ipc_flash_recv_err: + return ret; +} + +/* Function to send command to modem and receive response */ +static int ipc_flash_send_receive(struct iosm_devlink *ipc_devlink, u16 pack_id, + u8 *payload, u32 payload_length, u8 *mdm_rsp) +{ + size_t frame_len = IOSM_EBL_DW_PACK_SIZE; + int ret; + + if (pack_id == FLASH_SET_PROT_CONF) + frame_len = IOSM_EBL_W_PACK_SIZE; + + ret = ipc_flash_send_data(ipc_devlink, frame_len, pack_id, payload, + payload_length); + if (ret) + goto ipc_flash_send_rcv; + + ret = ipc_flash_receive_data(ipc_devlink, + frame_len - IOSM_EBL_HEAD_SIZE, mdm_rsp); + +ipc_flash_send_rcv: + return ret; +} + +/** + * ipc_flash_boot_set_capabilities - Set modem boot capabilities in flash + * @ipc_devlink: Pointer to devlink structure + * @mdm_rsp: Pointer to modem response buffer + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink, + u8 *mdm_rsp) +{ + ipc_devlink->ebl_ctx.ebl_sw_info_version = + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_RSP_SW_INFO_VER]; + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_ERASE] = IOSM_CAP_NOT_ENHANCED; + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_CRC] = IOSM_CAP_NOT_ENHANCED; + + if (ipc_devlink->ebl_ctx.m_ebl_resp[EBL_CAPS_FLAG] & + IOSM_CAP_USE_EXT_CAP) { + if (ipc_devlink->param.erase_full_flash) + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &= + ~((u8)IOSM_EXT_CAP_ERASE_ALL); + else + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &= + ~((u8)IOSM_EXT_CAP_COMMIT_ALL); + ipc_devlink->ebl_ctx.m_ebl_resp[EBL_EXT_CAPS_HANDLED] = + IOSM_CAP_USE_EXT_CAP; + } + + /* Write back the EBL capability to modem + * Request Set Protcnf command + */ + return ipc_flash_send_receive(ipc_devlink, FLASH_SET_PROT_CONF, + ipc_devlink->ebl_ctx.m_ebl_resp, + IOSM_EBL_RSP_SIZE, mdm_rsp); +} + +/* Read the SWID type and SWID value from the EBL */ +int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp) +{ + struct iosm_flash_msg_control cmd_msg; + struct iosm_swid_table *swid; + char ebl_swid[IOSM_SWID_STR]; + int ret; + + if (ipc_devlink->ebl_ctx.ebl_sw_info_version != + IOSM_EXT_CAP_SWID_OOS_PACK) + return -EINVAL; + + cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_READ); + cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_SWID_TABLE); + cmd_msg.length = cpu_to_le32(IOSM_MSG_LEN_ARG); + cmd_msg.arguments = cpu_to_le32(IOSM_MSG_LEN_ARG); + + ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL, + (u8 *)&cmd_msg, IOSM_MDM_SEND_16, mdm_rsp); + if (ret) + goto ipc_swid_err; + + cmd_msg.action = cpu_to_le32(*((u32 *)mdm_rsp)); + + ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_DATA_READ, + (u8 *)&cmd_msg, IOSM_MDM_SEND_4, mdm_rsp); + if (ret) + goto ipc_swid_err; + + swid = (struct iosm_swid_table *)mdm_rsp; + dev_dbg(ipc_devlink->dev, "SWID %x RF_ENGINE_ID %x", swid->sw_id_val, + swid->rf_engine_id_val); + + snprintf(ebl_swid, sizeof(ebl_swid), "SWID: %x, RF_ENGINE_ID: %x", + swid->sw_id_val, swid->rf_engine_id_val); + + devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, ebl_swid, + NULL, 0, 0); +ipc_swid_err: + return ret; +} + +/* Function to check if full erase or conditional erase was successful */ +static int ipc_flash_erase_check(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp) +{ + int ret, count = 0; + u16 mdm_rsp_data; + + /* Request Flash Erase Check */ + do { + mdm_rsp_data = IOSM_MDM_SEND_DATA; + ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_CHECK, + (u8 *)&mdm_rsp_data, + IOSM_MDM_SEND_2, mdm_rsp); + if (ret) + goto ipc_erase_chk_err; + + mdm_rsp_data = *((u16 *)mdm_rsp); + if (mdm_rsp_data > IOSM_MDM_ERASE_RSP) { + dev_err(ipc_devlink->dev, + "Flash Erase Check resp wrong 0x%04X", + mdm_rsp_data); + ret = -EINVAL; + goto ipc_erase_chk_err; + } + count++; + msleep(IOSM_FLASH_ERASE_CHECK_INTERVAL); + } while ((mdm_rsp_data != IOSM_MDM_ERASE_RSP) && + (count < (IOSM_FLASH_ERASE_CHECK_TIMEOUT / + IOSM_FLASH_ERASE_CHECK_INTERVAL))); + + if (mdm_rsp_data != IOSM_MDM_ERASE_RSP) { + dev_err(ipc_devlink->dev, "Modem erase check timeout failure!"); + ret = -ETIMEDOUT; + } + +ipc_erase_chk_err: + return ret; +} + +/* Full erase function which will erase the nand flash through EBL command */ +static int ipc_flash_full_erase(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp) +{ + u32 erase_address = IOSM_ERASE_START_ADDR; + struct iosm_flash_msg_control cmd_msg; + u32 erase_length = IOSM_ERASE_LEN; + int ret; + + dev_dbg(ipc_devlink->dev, "Erase full nand flash"); + cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_ERASE); + cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_ALL_FLASH); + cmd_msg.length = cpu_to_le32(erase_length); + cmd_msg.arguments = cpu_to_le32(erase_address); + + ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL, + (unsigned char *)&cmd_msg, + IOSM_MDM_SEND_16, mdm_rsp); + if (ret) + goto ipc_flash_erase_err; + + ipc_devlink->param.erase_full_flash_done = IOSM_SET_FLAG; + ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp); + +ipc_flash_erase_err: + return ret; +} + +/* Logic for flashing all the Loadmaps available for individual fls file */ +static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink, + const struct firmware *fw, u8 *mdm_rsp) +{ + __le32 reg_info[2]; /* 0th position region address, 1st position size */ + char *file_ptr; + u32 rest_len; + u32 raw_len; + int ret; + + file_ptr = (char *)fw->data; + reg_info[0] = cpu_to_le32(ipc_devlink->param.address); + + if (!ipc_devlink->param.erase_full_flash_done) { + reg_info[1] = cpu_to_le32(ipc_devlink->param.address + + fw->size - 2); + ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_START, + (u8 *)reg_info, IOSM_MDM_SEND_8, + mdm_rsp); + if (ret) + goto dl_region_fail; + + ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp); + if (ret) + goto dl_region_fail; + } + + /* Request Flash Set Address */ + ret = ipc_flash_send_receive(ipc_devlink, FLASH_SET_ADDRESS, + (u8 *)reg_info, IOSM_MDM_SEND_4, mdm_rsp); + if (ret) + goto dl_region_fail; + + rest_len = fw->size; + + /* Request Flash Write Raw Image */ + ret = ipc_flash_send_data(ipc_devlink, IOSM_EBL_DW_PACK_SIZE, + FLASH_WRITE_IMAGE_RAW, (u8 *)&rest_len, + IOSM_MDM_SEND_4); + if (ret) + goto dl_region_fail; + + do { + raw_len = (rest_len > IOSM_FLS_BUF_SIZE) ? IOSM_FLS_BUF_SIZE : + rest_len; + ret = ipc_imem_sys_devlink_write(ipc_devlink, file_ptr, + raw_len); + if (ret) { + dev_err(ipc_devlink->dev, "Image write failed"); + goto dl_region_fail; + } + file_ptr += raw_len; + rest_len -= raw_len; + } while (rest_len); + + ret = ipc_flash_receive_data(ipc_devlink, IOSM_EBL_DW_PAYL_SIZE, + mdm_rsp); + +dl_region_fail: + return ret; +} + +/** + * ipc_flash_send_fls - Inject Modem subsystem fls file to device + * @ipc_devlink: Pointer to devlink structure + * @fw: FW image + * @mdm_rsp: Pointer to modem response buffer + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink, + const struct firmware *fw, u8 *mdm_rsp) +{ + u16 flash_cmd; + int ret; + + if (ipc_devlink->param.erase_full_flash) { + ipc_devlink->param.erase_full_flash = false; + ret = ipc_flash_full_erase(ipc_devlink, mdm_rsp); + if (ret) + goto ipc_flash_err; + } + + /* Request Sec Start */ + if (!ipc_devlink->param.download_region) { + ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_START, + (u8 *)fw->data, fw->size, mdm_rsp); + if (ret) + goto ipc_flash_err; + } else { + /* Download regions */ + ipc_devlink->param.region_count -= IOSM_SET_FLAG; + ret = ipc_flash_download_region(ipc_devlink, fw, mdm_rsp); + if (ret) + goto ipc_flash_err; + + if (!ipc_devlink->param.region_count) { + /* Request Sec End */ + flash_cmd = IOSM_MDM_SEND_DATA; + ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_END, + (u8 *)&flash_cmd, + IOSM_MDM_SEND_2, mdm_rsp); + } + } + +ipc_flash_err: + return ret; +} + +/** + * ipc_flash_boot_psi - Inject PSI image + * @ipc_devlink: Pointer to devlink structure + * @fw: FW image + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink, + const struct firmware *fw) +{ + u8 psi_ack_byte[IOSM_PSI_ACK], read_data[2]; + u32 bytes_read; + u8 *psi_code; + int ret; + + dev_dbg(ipc_devlink->dev, "Boot transfer PSI"); + psi_code = kmemdup(fw->data, fw->size, GFP_KERNEL); + if (!psi_code) + return -ENOMEM; + + ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, fw->size); + if (ret) { + dev_err(ipc_devlink->dev, "RPSI Image write failed"); + goto ipc_flash_psi_free; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, + IOSM_LER_ACK_SIZE, &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "ipc_devlink_sio_read ACK failed"); + goto ipc_flash_psi_free; + } + + if (bytes_read != IOSM_LER_ACK_SIZE) { + ret = -EINVAL; + goto ipc_flash_psi_free; + } + + snprintf(psi_ack_byte, sizeof(psi_ack_byte), "%x%x", read_data[0], + read_data[1]); + devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, + psi_ack_byte, "PSI ACK", 0, 0); + + if (read_data[0] == 0x00 && read_data[1] == 0xCD) { + dev_dbg(ipc_devlink->dev, "Coredump detected"); + ret = ipc_coredump_get_list(ipc_devlink, + rpsi_cmd_coredump_start); + if (ret) + dev_err(ipc_devlink->dev, "Failed to get cd list"); + } + +ipc_flash_psi_free: + kfree(psi_code); + return ret; +} + +/** + * ipc_flash_boot_ebl - Inject EBL image + * @ipc_devlink: Pointer to devlink structure + * @fw: FW image + * + * Returns: 0 on success and failure value on error + */ +int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink, + const struct firmware *fw) +{ + u32 ebl_size = fw->size; + u8 read_data[2]; + u32 bytes_read; + int ret; + + if (ipc_mmio_get_exec_stage(ipc_devlink->pcie->imem->mmio) != + IPC_MEM_EXEC_STAGE_PSI) { + devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, + "Invalid execution stage", + NULL, 0, 0); + return -EINVAL; + } + + dev_dbg(ipc_devlink->dev, "Boot transfer EBL"); + ret = ipc_devlink_send_cmd(ipc_devlink, rpsi_cmd_code_ebl, + IOSM_RPSI_LOAD_SIZE); + if (ret) { + dev_err(ipc_devlink->dev, "Sending rpsi_cmd_code_ebl failed"); + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "rpsi_cmd_code_ebl read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_READ_SIZE) { + ret = -EINVAL; + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&ebl_size, + sizeof(ebl_size)); + if (ret) { + dev_err(ipc_devlink->dev, "EBL length write failed"); + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_READ_SIZE) { + ret = -EINVAL; + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_write(ipc_devlink, (unsigned char *)fw->data, + fw->size); + if (ret) { + dev_err(ipc_devlink->dev, "EBL data transfer failed"); + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE, + &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_READ_SIZE) { + ret = -EINVAL; + goto ipc_flash_ebl_err; + } + + ret = ipc_imem_sys_devlink_read(ipc_devlink, + ipc_devlink->ebl_ctx.m_ebl_resp, + IOSM_EBL_RSP_SIZE, &bytes_read); + if (ret) { + dev_err(ipc_devlink->dev, "EBL response read failed"); + goto ipc_flash_ebl_err; + } + + if (bytes_read != IOSM_EBL_RSP_SIZE) + ret = -EINVAL; + +ipc_flash_ebl_err: + return ret; +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.h b/drivers/net/wwan/iosm/iosm_ipc_flash.h new file mode 100644 index 000000000000..132d59d60fbe --- /dev/null +++ b/drivers/net/wwan/iosm/iosm_ipc_flash.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (C) 2020-2021 Intel Corporation. + */ + +#ifndef _IOSM_IPC_FLASH_H +#define _IOSM_IPC_FLASH_H + +/* Buffer size used to read the fls image */ +#define IOSM_FLS_BUF_SIZE 0x00100000 +/* Full erase start address */ +#define IOSM_ERASE_START_ADDR 0x00000000 +/* Erase length for NAND flash */ +#define IOSM_ERASE_LEN 0xFFFFFFFF +/* EBL response Header size */ +#define IOSM_EBL_HEAD_SIZE 8 +/* EBL payload size */ +#define IOSM_EBL_W_PAYL_SIZE 2048 +/* Total EBL pack size */ +#define IOSM_EBL_W_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_W_PAYL_SIZE) +/* EBL payload size */ +#define IOSM_EBL_DW_PAYL_SIZE 16384 +/* Total EBL pack size */ +#define IOSM_EBL_DW_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_DW_PAYL_SIZE) +/* EBL name size */ +#define IOSM_EBL_NAME 32 +/* Maximum supported error types */ +#define IOSM_MAX_ERRORS 8 +/* Read size for RPSI/EBL response */ +#define IOSM_READ_SIZE 2 +/* Link establishment response ack size */ +#define IOSM_LER_ACK_SIZE 2 +/* PSI ACK len */ +#define IOSM_PSI_ACK 8 +/* SWID capability for packed swid type */ +#define IOSM_EXT_CAP_SWID_OOS_PACK 0x02 +/* EBL error response buffer */ +#define IOSM_EBL_RSP_BUFF 0x0041 +/* SWID string length */ +#define IOSM_SWID_STR 64 +/* Load EBL command size */ +#define IOSM_RPSI_LOAD_SIZE 0 +/* EBL payload checksum */ +#define IOSM_EBL_CKSM 0x0000FFFF +/* SWID msg len and argument */ +#define IOSM_MSG_LEN_ARG 0 +/* Data to be sent to modem */ +#define IOSM_MDM_SEND_DATA 0x0000 +/* Data received from modem as part of erase check */ +#define IOSM_MDM_ERASE_RSP 0x0001 +/* Bit shift to calculate Checksum */ +#define IOSM_EBL_PAYL_SHIFT 16 +/* Flag To be set */ +#define IOSM_SET_FLAG 1 +/* Set flash erase check timeout to 100 msec */ +#define IOSM_FLASH_ERASE_CHECK_TIMEOUT 100 +/* Set flash erase check interval to 20 msec */ +#define IOSM_FLASH_ERASE_CHECK_INTERVAL 20 +/* Link establishment response ack size */ +#define IOSM_LER_RSP_SIZE 60 + +/** + * enum iosm_flash_package_type - Enum for the flashing operations + * @FLASH_SET_PROT_CONF: Write EBL capabilities + * @FLASH_SEC_START: Start writing the secpack + * @FLASH_SEC_END: Validate secpack end + * @FLASH_SET_ADDRESS: Set the address for flashing + * @FLASH_ERASE_START: Start erase before flashing + * @FLASH_ERASE_CHECK: Validate the erase functionality + * @FLASH_OOS_CONTROL: Retrieve data based on oos actions + * @FLASH_OOS_DATA_READ: Read data from EBL + * @FLASH_WRITE_IMAGE_RAW: Write the raw image to flash + */ +enum iosm_flash_package_type { + FLASH_SET_PROT_CONF = 0x0086, + FLASH_SEC_START = 0x0204, + FLASH_SEC_END, + FLASH_SET_ADDRESS = 0x0802, + FLASH_ERASE_START = 0x0805, + FLASH_ERASE_CHECK, + FLASH_OOS_CONTROL = 0x080C, + FLASH_OOS_DATA_READ = 0x080E, + FLASH_WRITE_IMAGE_RAW, +}; + +/** + * enum iosm_out_of_session_action - Actions possible over the + * OutOfSession command interface + * @FLASH_OOSC_ACTION_READ: Read data according to its type + * @FLASH_OOSC_ACTION_ERASE: Erase data according to its type + */ +enum iosm_out_of_session_action { + FLASH_OOSC_ACTION_READ = 2, + FLASH_OOSC_ACTION_ERASE = 3, +}; + +/** + * enum iosm_out_of_session_type - Data types that can be handled over the + * Out Of Session command Interface + * @FLASH_OOSC_TYPE_ALL_FLASH: The whole flash area + * @FLASH_OOSC_TYPE_SWID_TABLE: Read the swid table from the target + */ +enum iosm_out_of_session_type { + FLASH_OOSC_TYPE_ALL_FLASH = 8, + FLASH_OOSC_TYPE_SWID_TABLE = 16, +}; + +/** + * enum iosm_ebl_caps - EBL capability settings + * @IOSM_CAP_NOT_ENHANCED: If capability not supported + * @IOSM_CAP_USE_EXT_CAP: To be set if extended capability is set + * @IOSM_EXT_CAP_ERASE_ALL: Set Erase all capability + * @IOSM_EXT_CAP_COMMIT_ALL: Set the commit all capability + */ +enum iosm_ebl_caps { + IOSM_CAP_NOT_ENHANCED = 0x00, + IOSM_CAP_USE_EXT_CAP = 0x01, + IOSM_EXT_CAP_ERASE_ALL = 0x08, + IOSM_EXT_CAP_COMMIT_ALL = 0x20, +}; + +/** + * enum iosm_ebl_rsp - EBL response field + * @EBL_CAPS_FLAG: EBL capability flag + * @EBL_SKIP_ERASE: EBL skip erase flag + * @EBL_SKIP_CRC: EBL skip wr_pack crc + * @EBL_EXT_CAPS_HANDLED: EBL extended capability handled flag + * @EBL_OOS_CONFIG: EBL oos configuration + * @EBL_RSP_SW_INFO_VER: EBL SW info version + */ +enum iosm_ebl_rsp { + EBL_CAPS_FLAG = 50, + EBL_SKIP_ERASE = 54, + EBL_SKIP_CRC = 55, + EBL_EXT_CAPS_HANDLED = 57, + EBL_OOS_CONFIG = 64, + EBL_RSP_SW_INFO_VER = 70, +}; + +/** + * enum iosm_mdm_send_recv_data - Data to send to modem + * @IOSM_MDM_SEND_2: Send 2 bytes of payload + * @IOSM_MDM_SEND_4: Send 4 bytes of payload + * @IOSM_MDM_SEND_8: Send 8 bytes of payload + * @IOSM_MDM_SEND_16: Send 16 bytes of payload + */ +enum iosm_mdm_send_recv_data { + IOSM_MDM_SEND_2 = 2, + IOSM_MDM_SEND_4 = 4, + IOSM_MDM_SEND_8 = 8, + IOSM_MDM_SEND_16 = 16, +}; + +/** + * struct iosm_ebl_one_error - Structure containing error details + * @error_class: Error type- standard, security and text error + * @error_code: Specific error from error type + */ +struct iosm_ebl_one_error { + u16 error_class; + u16 error_code; +}; + +/** + * struct iosm_ebl_error- Structure with max error type supported + * @error: Array of one_error structure with max errors + */ +struct iosm_ebl_error { + struct iosm_ebl_one_error error[IOSM_MAX_ERRORS]; +}; + +/** + * struct iosm_swid_table - SWID table data for modem + * @number_of_data_sets: Number of swid types + * @sw_id_type: SWID type - SWID + * @sw_id_val: SWID value + * @rf_engine_id_type: RF engine ID type - RF_ENGINE_ID + * @rf_engine_id_val: RF engine ID value + */ +struct iosm_swid_table { + u32 number_of_data_sets; + char sw_id_type[IOSM_EBL_NAME]; + u32 sw_id_val; + char rf_engine_id_type[IOSM_EBL_NAME]; + u32 rf_engine_id_val; +}; + +/** + * struct iosm_flash_msg_control - Data sent to modem + * @action: Action to be performed + * @type: Type of action + * @length: Length of the action + * @arguments: Argument value sent to modem + */ +struct iosm_flash_msg_control { + __le32 action; + __le32 type; + __le32 length; + __le32 arguments; +}; + +/** + * struct iosm_flash_data - Header Data to be sent to modem + * @checksum: Checksum value calculated for the payload data + * @pack_id: Flash Action type + * @msg_length: Payload length + */ +struct iosm_flash_data { + __le16 checksum; + __le16 pack_id; + __le32 msg_length; +}; + +int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink, + const struct firmware *fw); + +int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink, + const struct firmware *fw); + +int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink, + u8 *mdm_rsp); + +int ipc_flash_link_establish(struct iosm_imem *ipc_imem); + +int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp); + +int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink, + const struct firmware *fw, u8 *mdm_rsp); +#endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c index 9f00e36b7f79..cff3b43ca4d7 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c @@ -6,6 +6,8 @@ #include <linux/delay.h> #include "iosm_ipc_chnl_cfg.h" +#include "iosm_ipc_devlink.h" +#include "iosm_ipc_flash.h" #include "iosm_ipc_imem.h" #include "iosm_ipc_port.h" @@ -263,9 +265,12 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem, switch (pipe->channel->ctype) { case IPC_CTYPE_CTRL: port_id = pipe->channel->channel_id; - - /* Pass the packet to the wwan layer. */ - wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb); + if (port_id == IPC_MEM_CTRL_CHL_ID_7) + ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, + skb); + else + wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, + skb); break; case IPC_CTYPE_WWAN: @@ -399,19 +404,8 @@ static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem) { struct ipc_mem_channel *channel; - if (ipc_imem->flash_channel_id < 0) { - ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL; - dev_err(ipc_imem->dev, "Missing flash app:%d", - ipc_imem->flash_channel_id); - return; - } - + channel = ipc_imem->ipc_devlink->devlink_sio.channel; ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio); - - /* Wake up the flash app to continue or to terminate depending - * on the CP ROM exit code. - */ - channel = &ipc_imem->channels[ipc_imem->flash_channel_id]; complete(&channel->ul_sem); } @@ -482,8 +476,8 @@ static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer) container_of(hr_timer, struct iosm_imem, startup_timer); if (ktime_to_ns(ipc_imem->hrtimer_period)) { - hrtimer_forward(&ipc_imem->startup_timer, ktime_get(), - ipc_imem->hrtimer_period); + hrtimer_forward_now(&ipc_imem->startup_timer, + ipc_imem->hrtimer_period); result = HRTIMER_RESTART; } @@ -572,7 +566,7 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq) enum ipc_phase old_phase, phase; bool retry_allocation = false; bool ul_pending = false; - int ch_id, i; + int i; if (irq != IMEM_IRQ_DONT_CARE) ipc_imem->ev_irq_pending[irq] = false; @@ -696,11 +690,8 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq) if ((phase == IPC_P_PSI || phase == IPC_P_EBL) && ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING && ipc_mmio_get_ipc_state(ipc_imem->mmio) == - IPC_MEM_DEVICE_IPC_RUNNING && - ipc_imem->flash_channel_id >= 0) { - /* Wake up the flash app to open the pipes. */ - ch_id = ipc_imem->flash_channel_id; - complete(&ipc_imem->channels[ch_id].ul_sem); + IPC_MEM_DEVICE_IPC_RUNNING) { + complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem); } /* Reset the expected CP state. */ @@ -1176,6 +1167,9 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem) ipc_port_deinit(ipc_imem->ipc_port); } + if (ipc_imem->ipc_devlink) + ipc_devlink_deinit(ipc_imem->ipc_devlink); + ipc_imem_device_ipc_uninit(ipc_imem); ipc_imem_channel_reset(ipc_imem); @@ -1258,6 +1252,7 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, void __iomem *mmio, struct device *dev) { struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL); + enum ipc_mem_exec_stage stage; if (!ipc_imem) return NULL; @@ -1272,9 +1267,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, ipc_imem->cp_version = 0; ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP; - /* Reset the flash channel id. */ - ipc_imem->flash_channel_id = -1; - /* Reset the max number of configured channels */ ipc_imem->nr_of_channels = 0; @@ -1328,8 +1320,21 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id, goto imem_config_fail; } - return ipc_imem; + stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + if (stage == IPC_MEM_EXEC_STAGE_BOOT) { + /* Alloc and Register devlink */ + ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem); + if (!ipc_imem->ipc_devlink) { + dev_err(ipc_imem->dev, "Devlink register failed"); + goto imem_config_fail; + } + if (ipc_flash_link_establish(ipc_imem)) + goto devlink_channel_fail; + } + return ipc_imem; +devlink_channel_fail: + ipc_devlink_deinit(ipc_imem->ipc_devlink); imem_config_fail: hrtimer_cancel(&ipc_imem->td_alloc_timer); hrtimer_cancel(&ipc_imem->fast_update_timer); @@ -1361,3 +1366,51 @@ void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend) { ipc_imem->td_update_timer_suspended = suspend; } + +/* Verify the CP execution state, copy the chip info, + * change the execution phase to ROM + */ +static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem, + int arg, void *msg, + size_t msgsize) +{ + enum ipc_mem_exec_stage stage; + struct sk_buff *skb; + int rc = -EINVAL; + size_t size; + + /* Test the CP execution state. */ + stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + if (stage != IPC_MEM_EXEC_STAGE_BOOT) { + dev_err(ipc_imem->dev, + "Execution_stage: expected BOOT, received = %X", stage); + goto trigger_chip_info_fail; + } + /* Allocate a new sk buf for the chip info. */ + size = ipc_imem->mmio->chip_info_size; + if (size > IOSM_CHIP_INFO_SIZE_MAX) + goto trigger_chip_info_fail; + + skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size); + if (!skb) { + dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory"); + rc = -ENOMEM; + goto trigger_chip_info_fail; + } + /* Copy the chip info characters into the ipc_skb. */ + ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size); + /* First change to the ROM boot phase. */ + dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage); + ipc_imem->phase = ipc_imem_phase_update(ipc_imem); + ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb); + rc = 0; +trigger_chip_info_fail: + return rc; +} + +int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem) +{ + return ipc_task_queue_send_task(ipc_imem, + ipc_imem_devlink_trigger_chip_info_cb, + 0, NULL, 0, true); +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h index dc65b0712261..6be6708b4eec 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h @@ -69,7 +69,7 @@ struct ipc_chnl_cfg; #define IMEM_IRQ_DONT_CARE (-1) -#define IPC_MEM_MAX_CHANNELS 7 +#define IPC_MEM_MAX_CHANNELS 8 #define IPC_MEM_MUX_IP_SESSION_ENTRIES 8 @@ -98,6 +98,7 @@ struct ipc_chnl_cfg; #define IPC_MEM_DL_ETH_OFFSET 16 #define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb)) +#define IOSM_CHIP_INFO_SIZE_MAX 100 #define FULLY_FUNCTIONAL 0 @@ -304,9 +305,9 @@ enum ipc_phase { * @ipc_port: IPC PORT data structure pointer * @pcie: IPC PCIe * @dev: Pointer to device structure - * @flash_channel_id: Reserved channel id for flashing to RAM. * @ipc_requested_state: Expected IPC state on CP. * @channels: Channel list with UL/DL pipe pairs. + * @ipc_devlink: IPC Devlink data structure pointer * @ipc_status: local ipc_status * @nr_of_channels: number of configured channels * @startup_timer: startup timer for NAND support. @@ -349,9 +350,9 @@ struct iosm_imem { struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS]; struct iosm_pcie *pcie; struct device *dev; - int flash_channel_id; enum ipc_mem_device_ipc_state ipc_requested_state; struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS]; + struct iosm_devlink *ipc_devlink; u32 ipc_status; u32 nr_of_channels; struct hrtimer startup_timer; @@ -575,4 +576,15 @@ void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem); */ void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype, struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation); + +/** + * ipc_imem_devlink_trigger_chip_info - Inform devlink that the chip + * information are available if the + * flashing to RAM interworking shall be + * executed. + * @ipc_imem: Pointer to imem structure + * + * Returns: 0 on success, -1 on failure + */ +int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem); #endif diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 0a472ce77370..b885a6570235 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -6,6 +6,7 @@ #include <linux/delay.h> #include "iosm_ipc_chnl_cfg.h" +#include "iosm_ipc_devlink.h" #include "iosm_ipc_imem.h" #include "iosm_ipc_imem_ops.h" #include "iosm_ipc_port.h" @@ -331,3 +332,319 @@ int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb) out: return ret; } + +/* Open a SIO link to CP and return the channel instance */ +struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem) +{ + struct ipc_mem_channel *channel; + enum ipc_phase phase; + int channel_id; + + phase = ipc_imem_phase_update(ipc_imem); + switch (phase) { + case IPC_P_OFF: + case IPC_P_ROM: + /* Get a channel id as flash id and reserve it. */ + channel_id = ipc_imem_channel_alloc(ipc_imem, + IPC_MEM_CTRL_CHL_ID_7, + IPC_CTYPE_CTRL); + + if (channel_id < 0) { + dev_err(ipc_imem->dev, + "reservation of a flash channel id failed"); + goto error; + } + + ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id; + channel = &ipc_imem->channels[channel_id]; + + /* Enqueue chip info data to be read */ + if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) { + dev_err(ipc_imem->dev, "Enqueue of chip info failed"); + channel->state = IMEM_CHANNEL_FREE; + goto error; + } + + return channel; + + case IPC_P_PSI: + case IPC_P_EBL: + ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio); + if (ipc_imem->cp_version == -1) { + dev_err(ipc_imem->dev, "invalid CP version"); + goto error; + } + + channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id; + return ipc_imem_channel_open(ipc_imem, channel_id, + IPC_HP_CDEV_OPEN); + + default: + /* CP is in the wrong state (e.g. CRASH or CD_READY) */ + dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase); + } +error: + return NULL; +} + +/* Release a SIO channel link to CP. */ +void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink) +{ + struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem; + int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT; + enum ipc_mem_exec_stage exec_stage; + struct ipc_mem_channel *channel; + enum ipc_phase curr_phase; + int status = 0; + u32 tail = 0; + + channel = ipc_imem->ipc_devlink->devlink_sio.channel; + curr_phase = ipc_imem->phase; + /* Increase the total wait time to boot_check_timeout */ + do { + exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + if (exec_stage == IPC_MEM_EXEC_STAGE_RUN || + exec_stage == IPC_MEM_EXEC_STAGE_PSI) + break; + msleep(20); + boot_check_timeout -= 20; + } while (boot_check_timeout > 0); + + /* If there are any pending TDs then wait for Timeout/Completion before + * closing pipe. + */ + if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) { + status = wait_for_completion_interruptible_timeout + (&ipc_imem->ul_pend_sem, + msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); + if (status == 0) { + dev_dbg(ipc_imem->dev, + "Data Timeout on UL-Pipe:%d Head:%d Tail:%d", + channel->ul_pipe.pipe_nr, + channel->ul_pipe.old_head, + channel->ul_pipe.old_tail); + } + } + + ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, + &channel->dl_pipe, NULL, &tail); + + if (tail != channel->dl_pipe.old_tail) { + status = wait_for_completion_interruptible_timeout + (&ipc_imem->dl_pend_sem, + msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT)); + if (status == 0) { + dev_dbg(ipc_imem->dev, + "Data Timeout on DL-Pipe:%d Head:%d Tail:%d", + channel->dl_pipe.pipe_nr, + channel->dl_pipe.old_head, + channel->dl_pipe.old_tail); + } + } + + /* Due to wait for completion in messages, there is a small window + * between closing the pipe and updating the channel is closed. In this + * small window there could be HP update from Host Driver. Hence update + * the channel state as CLOSING to aviod unnecessary interrupt + * towards CP. + */ + channel->state = IMEM_CHANNEL_CLOSING; + /* Release the pipe resources */ + ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe); + ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe); +} + +void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, + struct sk_buff *skb) +{ + skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb); + complete(&ipc_devlink->devlink_sio.read_sem); +} + +/* PSI transfer */ +static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem, + struct ipc_mem_channel *channel, + unsigned char *buf, int count) +{ + int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT; + enum ipc_mem_exec_stage exec_stage; + + dma_addr_t mapping = 0; + int ret; + + ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping, + DMA_TO_DEVICE); + if (ret) + goto pcie_addr_map_fail; + + /* Save the PSI information for the CP ROM driver on the doorbell + * scratchpad. + */ + ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count); + ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT); + + ret = wait_for_completion_interruptible_timeout + (&channel->ul_sem, + msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT)); + + if (ret <= 0) { + dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d", + ret); + goto psi_transfer_fail; + } + /* If the PSI download fails, return the CP boot ROM exit code */ + if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT && + ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) { + ret = (-1) * ((int)ipc_imem->rom_exit_code); + goto psi_transfer_fail; + } + + dev_dbg(ipc_imem->dev, "PSI image successfully downloaded"); + + /* Wait psi_start_timeout milliseconds until the CP PSI image is + * running and updates the execution_stage field with + * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage. + */ + do { + exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio); + + if (exec_stage == IPC_MEM_EXEC_STAGE_PSI) + break; + + msleep(20); + psi_start_timeout -= 20; + } while (psi_start_timeout > 0); + + if (exec_stage != IPC_MEM_EXEC_STAGE_PSI) + goto psi_transfer_fail; /* Unknown status of CP PSI process. */ + + ipc_imem->phase = IPC_P_PSI; + + /* Enter the PSI phase. */ + dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage); + + /* Request the RUNNING state from CP and wait until it was reached + * or timeout. + */ + ipc_imem_ipc_init_check(ipc_imem); + + ret = wait_for_completion_interruptible_timeout + (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT)); + if (ret <= 0) { + dev_err(ipc_imem->dev, + "Failed PSI RUNNING state on CP, Error-%d", ret); + goto psi_transfer_fail; + } + + if (ipc_mmio_get_ipc_state(ipc_imem->mmio) != + IPC_MEM_DEVICE_IPC_RUNNING) { + dev_err(ipc_imem->dev, + "ch[%d] %s: unexpected CP IPC state %d, not RUNNING", + channel->channel_id, + ipc_imem_phase_get_string(ipc_imem->phase), + ipc_mmio_get_ipc_state(ipc_imem->mmio)); + + goto psi_transfer_fail; + } + + /* Create the flash channel for the transfer of the images. */ + if (!ipc_imem_sys_devlink_open(ipc_imem)) { + dev_err(ipc_imem->dev, "can't open flash_channel"); + goto psi_transfer_fail; + } + + ret = 0; +psi_transfer_fail: + ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE); +pcie_addr_map_fail: + return ret; +} + +int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink, + unsigned char *buf, int count) +{ + struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem; + struct ipc_mem_channel *channel; + struct sk_buff *skb; + dma_addr_t mapping; + int ret; + + channel = ipc_imem->ipc_devlink->devlink_sio.channel; + + /* In the ROM phase the PSI image is passed to CP about a specific + * shared memory area and doorbell scratchpad directly. + */ + if (ipc_imem->phase == IPC_P_ROM) { + ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count); + /* If the PSI transfer fails then send crash + * Signature. + */ + if (ret > 0) + ipc_imem_msg_send_feature_set(ipc_imem, + IPC_MEM_INBAND_CRASH_SIG, + false); + goto out; + } + + /* Allocate skb memory for the uplink buffer. */ + skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping, + DMA_TO_DEVICE, 0); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + memcpy(skb_put(skb, count), buf, count); + + IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED; + + /* Add skb to the uplink skbuf accumulator. */ + skb_queue_tail(&channel->ul_list, skb); + + /* Inform the IPC tasklet to pass uplink IP packets to CP. */ + if (!ipc_imem_call_cdev_write(ipc_imem)) { + ret = wait_for_completion_interruptible(&channel->ul_sem); + + if (ret < 0) { + dev_err(ipc_imem->dev, + "ch[%d] no CP confirmation, status = %d", + channel->channel_id, ret); + ipc_pcie_kfree_skb(ipc_devlink->pcie, skb); + goto out; + } + } + ret = 0; +out: + return ret; +} + +int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data, + u32 bytes_to_read, u32 *bytes_read) +{ + struct sk_buff *skb = NULL; + int rc = 0; + + /* check skb is available in rx_list or wait for skb */ + devlink->devlink_sio.devlink_read_pend = 1; + while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) { + if (!wait_for_completion_interruptible_timeout + (&devlink->devlink_sio.read_sem, + msecs_to_jiffies(IPC_READ_TIMEOUT))) { + dev_err(devlink->dev, "Read timedout"); + rc = -ETIMEDOUT; + goto devlink_read_fail; + } + } + devlink->devlink_sio.devlink_read_pend = 0; + if (bytes_to_read < skb->len) { + dev_err(devlink->dev, "Invalid size,expected len %d", skb->len); + rc = -EINVAL; + goto devlink_read_fail; + } + *bytes_read = skb->len; + memcpy(data, skb->data, skb->len); + +devlink_read_fail: + ipc_pcie_kfree_skb(devlink->pcie, skb); + return rc; +} diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index 2007fe23e9a5..f0c88ac5643c 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -9,7 +9,7 @@ #include "iosm_ipc_mux_codec.h" /* Maximum wait time for blocking read */ -#define IPC_READ_TIMEOUT 500 +#define IPC_READ_TIMEOUT 3000 /* The delay in ms for defering the unregister */ #define SIO_UNREGISTER_DEFER_DELAY_MS 1 @@ -98,4 +98,51 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id, */ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, enum ipc_mux_protocol mux_type); + +/** + * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP + * @ipc_imem: iosm_imem instance + * + * Return: channel instance on success, NULL for failure + */ +struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem); + +/** + * ipc_imem_sys_devlink_close - Release a Flash/CD channel link to CP + * @ipc_devlink: Pointer to ipc_devlink data-struct + * + */ +void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink); + +/** + * ipc_imem_sys_devlink_notify_rx - Receive downlink characters from CP, + * the downlink skbuf is added at the end of the + * downlink or rx list + * @ipc_devlink: Pointer to ipc_devlink data-struct + * @skb: Pointer to sk buffer + */ +void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink, + struct sk_buff *skb); + +/** + * ipc_imem_sys_devlink_read - Copy the rx data and free the skbuf + * @ipc_devlink: Devlink instance + * @data: Buffer to read the data from modem + * @bytes_to_read: Size of destination buffer + * @bytes_read: Number of bytes read + * + * Return: 0 on success and failure value on error + */ +int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data, + u32 bytes_to_read, u32 *bytes_read); + +/** + * ipc_imem_sys_devlink_write - Route the uplink buffer to CP + * @ipc_devlink: Devlink_sio instance + * @buf: Pointer to buffer + * @count: Number of data bytes to write + * Return: 0 on success and failure value on error + */ +int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink, + unsigned char *buf, int count); #endif |