diff options
Diffstat (limited to 'drivers/net/bonding/bond_main.c')
-rw-r--r-- | drivers/net/bonding/bond_main.c | 324 |
1 files changed, 272 insertions, 52 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index aebeb46e6fa6..15eddca7b4b6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -88,6 +88,7 @@ #if IS_ENABLED(CONFIG_TLS_DEVICE) #include <net/tls.h> #endif +#include <net/ip6_route.h> #include "bonding_priv.h" @@ -2793,31 +2794,15 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) return ret; } -/* We go to the (large) trouble of VLAN tagging ARP frames because - * switches in VLAN mode (especially if ports are configured as - * "native" to a VLAN) might not pass non-tagged frames. - */ -static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, - __be32 src_ip, struct bond_vlan_tag *tags) +static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags, + struct sk_buff *skb) { - struct sk_buff *skb; - struct bond_vlan_tag *outer_tag = tags; - struct net_device *slave_dev = slave->dev; struct net_device *bond_dev = slave->bond->dev; - - slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n", - arp_op, &dest_ip, &src_ip); - - skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, - NULL, slave_dev->dev_addr, NULL); - - if (!skb) { - net_err_ratelimited("ARP packet allocation failed\n"); - return; - } + struct net_device *slave_dev = slave->dev; + struct bond_vlan_tag *outer_tag = tags; if (!tags || tags->vlan_proto == VLAN_N_VID) - goto xmit; + return true; tags++; @@ -2834,7 +2819,7 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, tags->vlan_id); if (!skb) { net_err_ratelimited("failed to insert inner VLAN tag\n"); - return; + return false; } tags++; @@ -2847,8 +2832,34 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, outer_tag->vlan_id); } -xmit: - arp_xmit(skb); + return true; +} + +/* We go to the (large) trouble of VLAN tagging ARP frames because + * switches in VLAN mode (especially if ports are configured as + * "native" to a VLAN) might not pass non-tagged frames. + */ +static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip, + __be32 src_ip, struct bond_vlan_tag *tags) +{ + struct net_device *bond_dev = slave->bond->dev; + struct net_device *slave_dev = slave->dev; + struct sk_buff *skb; + + slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n", + arp_op, &dest_ip, &src_ip); + + skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, + NULL, slave_dev->dev_addr, NULL); + + if (!skb) { + net_err_ratelimited("ARP packet allocation failed\n"); + return; + } + + if (bond_handle_vlan(slave, tags, skb)) + arp_xmit(skb); + return; } /* Validate the device path between the @start_dev and the @end_dev. @@ -2965,30 +2976,17 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 slave->target_last_arp_rx[i] = jiffies; } -int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, - struct slave *slave) +static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave) { struct arphdr *arp = (struct arphdr *)skb->data; struct slave *curr_active_slave, *curr_arp_slave; unsigned char *arp_ptr; __be32 sip, tip; - int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); unsigned int alen; - if (!slave_do_arp_validate(bond, slave)) { - if ((slave_do_arp_validate_only(bond) && is_arp) || - !slave_do_arp_validate_only(bond)) - slave->last_rx = jiffies; - return RX_HANDLER_ANOTHER; - } else if (!is_arp) { - return RX_HANDLER_ANOTHER; - } - alen = arp_hdr_len(bond->dev); - slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", - __func__, skb->dev->name); - if (alen > skb_headlen(skb)) { arp = kmalloc(alen, GFP_ATOMIC); if (!arp) @@ -3059,6 +3057,216 @@ out_unlock: return RX_HANDLER_ANOTHER; } +#if IS_ENABLED(CONFIG_IPV6) +static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr, + const struct in6_addr *saddr, struct bond_vlan_tag *tags) +{ + struct net_device *bond_dev = slave->bond->dev; + struct net_device *slave_dev = slave->dev; + struct in6_addr mcaddr; + struct sk_buff *skb; + + slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n", + daddr, saddr); + + skb = ndisc_ns_create(slave_dev, daddr, saddr, 0); + if (!skb) { + net_err_ratelimited("NS packet allocation failed\n"); + return; + } + + addrconf_addr_solict_mult(daddr, &mcaddr); + if (bond_handle_vlan(slave, tags, skb)) + ndisc_send_skb(skb, &mcaddr, saddr); +} + +static void bond_ns_send_all(struct bonding *bond, struct slave *slave) +{ + struct in6_addr *targets = bond->params.ns_targets; + struct bond_vlan_tag *tags; + struct dst_entry *dst; + struct in6_addr saddr; + struct flowi6 fl6; + int i; + + for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) { + slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n", + __func__, &targets[i]); + tags = NULL; + + /* Find out through which dev should the packet go */ + memset(&fl6, 0, sizeof(struct flowi6)); + fl6.daddr = targets[i]; + fl6.flowi6_oif = bond->dev->ifindex; + + dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); + if (dst->error) { + dst_release(dst); + /* there's no route to target - try to send arp + * probe to generate any traffic (arp_validate=0) + */ + if (bond->params.arp_validate) + pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n", + bond->dev->name, + &targets[i]); + bond_ns_send(slave, &targets[i], &in6addr_any, tags); + continue; + } + + /* bond device itself */ + if (dst->dev == bond->dev) + goto found; + + rcu_read_lock(); + tags = bond_verify_device_path(bond->dev, dst->dev, 0); + rcu_read_unlock(); + + if (!IS_ERR_OR_NULL(tags)) + goto found; + + /* Not our device - skip */ + slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n", + &targets[i], dst->dev ? dst->dev->name : "NULL"); + + dst_release(dst); + continue; + +found: + if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr)) + bond_ns_send(slave, &targets[i], &saddr, tags); + dst_release(dst); + kfree(tags); + } +} + +static int bond_confirm_addr6(struct net_device *dev, + struct netdev_nested_priv *priv) +{ + struct in6_addr *addr = (struct in6_addr *)priv->data; + + return ipv6_chk_addr(dev_net(dev), addr, dev, 0); +} + +static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) +{ + struct netdev_nested_priv priv = { + .data = addr, + }; + int ret = false; + + if (bond_confirm_addr6(bond->dev, &priv)) + return true; + + rcu_read_lock(); + if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv)) + ret = true; + rcu_read_unlock(); + + return ret; +} + +static void bond_validate_ns(struct bonding *bond, struct slave *slave, + struct in6_addr *saddr, struct in6_addr *daddr) +{ + int i; + + if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) { + slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", + __func__, saddr, daddr); + return; + } + + i = bond_get_targets_ip6(bond->params.ns_targets, saddr); + if (i == -1) { + slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n", + __func__, saddr); + return; + } + slave->last_rx = jiffies; + slave->target_last_arp_rx[i] = jiffies; +} + +static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave) +{ + struct slave *curr_active_slave, *curr_arp_slave; + struct icmp6hdr *hdr = icmp6_hdr(skb); + struct in6_addr *saddr, *daddr; + + if (skb->pkt_type == PACKET_OTHERHOST || + skb->pkt_type == PACKET_LOOPBACK || + hdr->icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT) + goto out; + + saddr = &ipv6_hdr(skb)->saddr; + daddr = &ipv6_hdr(skb)->daddr; + + slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", + __func__, slave->dev->name, bond_slave_state(slave), + bond->params.arp_validate, slave_do_arp_validate(bond, slave), + saddr, daddr); + + curr_active_slave = rcu_dereference(bond->curr_active_slave); + curr_arp_slave = rcu_dereference(bond->current_arp_slave); + + /* We 'trust' the received ARP enough to validate it if: + * see bond_arp_rcv(). + */ + if (bond_is_active_slave(slave)) + bond_validate_ns(bond, slave, saddr, daddr); + else if (curr_active_slave && + time_after(slave_last_rx(bond, curr_active_slave), + curr_active_slave->last_link_up)) + bond_validate_ns(bond, slave, saddr, daddr); + else if (curr_arp_slave && + bond_time_in_interval(bond, + dev_trans_start(curr_arp_slave->dev), 1)) + bond_validate_ns(bond, slave, saddr, daddr); + +out: + return RX_HANDLER_ANOTHER; +} +#endif + +int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, + struct slave *slave) +{ +#if IS_ENABLED(CONFIG_IPV6) + bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6); +#endif + bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); + + slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", + __func__, skb->dev->name); + + /* Use arp validate logic for both ARP and NS */ + if (!slave_do_arp_validate(bond, slave)) { + if ((slave_do_arp_validate_only(bond) && is_arp) || +#if IS_ENABLED(CONFIG_IPV6) + (slave_do_arp_validate_only(bond) && is_ipv6) || +#endif + !slave_do_arp_validate_only(bond)) + slave->last_rx = jiffies; + return RX_HANDLER_ANOTHER; + } else if (is_arp) { + return bond_arp_rcv(skb, bond, slave); +#if IS_ENABLED(CONFIG_IPV6) + } else if (is_ipv6) { + return bond_na_rcv(skb, bond, slave); +#endif + } else { + return RX_HANDLER_ANOTHER; + } +} + +static void bond_send_validate(struct bonding *bond, struct slave *slave) +{ + bond_arp_send_all(bond, slave); +#if IS_ENABLED(CONFIG_IPV6) + bond_ns_send_all(bond, slave); +#endif +} + /* function to verify if we're in the arp_interval timeslice, returns true if * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval + * arp_interval/2) . the arp_interval/2 is needed for really fast networks. @@ -3154,7 +3362,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) * to be unstable during low/no traffic periods */ if (bond_slave_is_up(slave)) - bond_arp_send_all(bond, slave); + bond_send_validate(bond, slave); } rcu_read_unlock(); @@ -3368,7 +3576,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) curr_active_slave->dev->name); if (curr_active_slave) { - bond_arp_send_all(bond, curr_active_slave); + bond_send_validate(bond, curr_active_slave); return should_notify_rtnl; } @@ -3420,7 +3628,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) bond_set_slave_link_state(new_slave, BOND_LINK_BACK, BOND_SLAVE_NOTIFY_LATER); bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); - bond_arp_send_all(bond, new_slave); + bond_send_validate(bond, new_slave); new_slave->last_link_up = jiffies; rcu_assign_pointer(bond->current_arp_slave, new_slave); @@ -3956,7 +4164,7 @@ static int bond_open(struct net_device *bond_dev) if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ queue_delayed_work(bond->wq, &bond->arp_work, 0); - bond->recv_probe = bond_arp_rcv; + bond->recv_probe = bond_rcv_validate; } if (BOND_MODE(bond) == BOND_MODE_8023AD) { @@ -4912,7 +5120,7 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, if (xmit_suc) return NETDEV_TX_OK; - atomic_long_inc(&bond_dev->tx_dropped); + dev_core_stats_tx_dropped_inc(bond_dev); return NET_XMIT_DROP; } @@ -5937,6 +6145,7 @@ static int bond_check_params(struct bond_params *params) strscpy_pad(params->primary, primary, sizeof(params->primary)); memcpy(params->arp_targets, arp_target, sizeof(arp_target)); + memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS); return 0; } @@ -6047,27 +6256,38 @@ static int __net_init bond_net_init(struct net *net) return 0; } -static void __net_exit bond_net_exit(struct net *net) +static void __net_exit bond_net_exit_batch(struct list_head *net_list) { - struct bond_net *bn = net_generic(net, bond_net_id); - struct bonding *bond, *tmp_bond; + struct bond_net *bn; + struct net *net; LIST_HEAD(list); - bond_destroy_sysfs(bn); + list_for_each_entry(net, net_list, exit_list) { + bn = net_generic(net, bond_net_id); + bond_destroy_sysfs(bn); + } /* Kill off any bonds created after unregistering bond rtnl ops */ rtnl_lock(); - list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) - unregister_netdevice_queue(bond->dev, &list); + list_for_each_entry(net, net_list, exit_list) { + struct bonding *bond, *tmp_bond; + + bn = net_generic(net, bond_net_id); + list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) + unregister_netdevice_queue(bond->dev, &list); + } unregister_netdevice_many(&list); rtnl_unlock(); - bond_destroy_proc_dir(bn); + list_for_each_entry(net, net_list, exit_list) { + bn = net_generic(net, bond_net_id); + bond_destroy_proc_dir(bn); + } } static struct pernet_operations bond_net_ops = { .init = bond_net_init, - .exit = bond_net_exit, + .exit_batch = bond_net_exit_batch, .id = &bond_net_id, .size = sizeof(struct bond_net), }; |