diff options
author | Simon Horman <horms@verge.net.au> | 2013-09-24 16:44:06 +0900 |
---|---|---|
committer | Jesse Gross <jesse@nicira.com> | 2013-09-25 18:19:00 -0700 |
commit | c1f90e07a6733dd4399bbb47ed6d63c6fa07573f (patch) | |
tree | 201cf9f4413775acd2968b0d241e5fddb887da60 /datapath/linux/compat/gso.c | |
parent | 6b30e53e6290b43e60c7c6f64f568a1d17961920 (diff) | |
download | openvswitch-c1f90e07a6733dd4399bbb47ed6d63c6fa07573f.tar.gz |
datapath: Move segmentation compatibility code into a compatibility function
Move segmentation compatibility code out of netdev_send and into
rpl_dev_queue_xmit(), a compatibility function used in place
of dev_queue_xmit() as necessary.
As suggested by Jesse Gross.
Some minor though verbose implementation notes:
* This rpl_dev_queue_xmit() endeavours to return a valid error code or
zero on success as per dev_queue_xmit(). The exception is that when
dev_queue_xmit() is called in a loop only the status of the last call is
taken into account, thus ignoring any errors returned by previous calls.
This is derived from the previous calls to dev_queue_xmit() in a loop
where netdev_send() ignores the return value of dev_queue_xmit()
entirely.
* netdev_send() continues to ignore the value of dev_queue_xmit().
So the discussion of the return value of rpl_dev_queue_xmit()
above is has no bearing on run-time behaviour.
* The return value of netdev_send() may differ from the previous
implementation in the case where segmentation is performed before
calling the real dev_queue_xmit(). This is because previously in
this case netdev_send() would return the combined length of the
skbs resulting from segmentation. Whereas the current code
always returns the length of the original skb.
Signed-off-by: Simon Horman <horms@verge.net.au>
[jesse: adjust error path in netdev_send() to match upstream]
Signed-off-by: Jesse Gross <jesse@nicira.com>
Diffstat (limited to 'datapath/linux/compat/gso.c')
-rw-r--r-- | datapath/linux/compat/gso.c | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/datapath/linux/compat/gso.c b/datapath/linux/compat/gso.c index 30332a2dc..32f906c82 100644 --- a/datapath/linux/compat/gso.c +++ b/datapath/linux/compat/gso.c @@ -36,6 +36,86 @@ #include "gso.h" +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) && \ + !defined(HAVE_VLAN_BUG_WORKAROUND) +#include <linux/module.h> + +static int vlan_tso __read_mostly; +module_param(vlan_tso, int, 0644); +MODULE_PARM_DESC(vlan_tso, "Enable TSO for VLAN packets"); +#else +#define vlan_tso true +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) +static bool dev_supports_vlan_tx(struct net_device *dev) +{ +#if defined(HAVE_VLAN_BUG_WORKAROUND) + return dev->features & NETIF_F_HW_VLAN_TX; +#else + /* Assume that the driver is buggy. */ + return false; +#endif +} + +int rpl_dev_queue_xmit(struct sk_buff *skb) +{ +#undef dev_queue_xmit + int err = -ENOMEM; + + if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { + int features; + + features = netif_skb_features(skb); + + if (!vlan_tso) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_UFO | NETIF_F_FSO); + + skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); + if (unlikely(!skb)) + return err; + vlan_set_tci(skb, 0); + + if (netif_needs_gso(skb, features)) { + struct sk_buff *nskb; + + nskb = skb_gso_segment(skb, features); + if (!nskb) { + if (unlikely(skb_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + goto drop; + + skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; + goto xmit; + } + + if (IS_ERR(nskb)) { + err = PTR_ERR(nskb); + goto drop; + } + consume_skb(skb); + skb = nskb; + + do { + nskb = skb->next; + skb->next = NULL; + err = dev_queue_xmit(skb); + skb = nskb; + } while (skb); + + return err; + } + } +xmit: + return dev_queue_xmit(skb); + +drop: + kfree_skb(skb); + return err; +} +#endif /* kernel version < 2.6.37 */ + static __be16 __skb_network_protocol(struct sk_buff *skb) { __be16 type = skb->protocol; |