From 85831b0913b72b58037c76c61ee044cf9deb240e Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Fri, 30 Mar 2018 20:48:23 +0300 Subject: [PATCH] ixgbe: Add support for Double VLAN mode There is no practical limit on number of stacked vlans in Linux which means multiple VLAN headers could be present in Ethernet frames. However it is less common to have stacking depth more than two which is known as 802.1ad or QinQ protocols. By default driver provides hardware offloading only for one VLAN header: it supports filtering by vlan id (vid), strip on receive, insert on transmit; as well as skipping header to provide offload for next protocol. In case of stacked vlans next protocol is also VLAN which is in turn isn't known to hardware without additional configuration. That means hardware does not provide any additional offload like receive hash calculation, direction to specific receive queue etc. Last one means that RSS does not work for packets with multiple VLAN headers. Hardware supports double vlans when configured in specific mode called Global Double VLAN in "Intel(R) 82599 10 GbE Controller Datasheet". With this change we implement support for this mode in ixgbe driver. Signed-off-by: Serhey Popovych --- ixgbe.7 | 14 ++ src/ixgbe.h | 11 +- src/ixgbe_ethtool.c | 88 +++++++++++- src/ixgbe_main.c | 297 ++++++++++++++++++++++++++++++++++------ src/ixgbe_param.c | 66 +++++++++ src/ixgbe_sriov.c | 4 + src/ixgbe_txrx_common.h | 2 +- src/ixgbe_type.h | 3 + src/ixgbe_xsk.c | 4 +- 9 files changed, 444 insertions(+), 45 deletions(-) diff --git a/ixgbe.7 b/ixgbe.7 index af59b44..765f816 100644 --- a/ixgbe.7 +++ b/ixgbe.7 @@ -209,6 +209,20 @@ This parameter is only relevant for devices operating in SR-IOV mode. When this parameter is set, the driver detects malicious VF driver and disables its Tx/Rx queues until a VF driver reset occurs. .LP +.B DV (Double VLAN) +.IP +.B Valid Range: 0-1 +.IP +0 = Disabled +.IP +1 = Enabled +.IP +Control Double VLAN mode on device. If enabled hardware assumes that at least +single vlan header present in packet buffer and second header is skipped to +enable rest of L2/L3 offloading processing (e.g. RSS). It is disabled by default +since VID filtering, if enabled, is done in software by driver and not compatible +with VMDq. +.LP .B Jumbo Frames .IP Jumbo Frames support is enabled by changing the Maximum Transmission Unit (MTU) to a value larger than the default value of 1500. diff --git a/src/ixgbe.h b/src/ixgbe.h index 4c6cda8..ff9f88a 100644 --- a/src/ixgbe.h +++ b/src/ixgbe.h @@ -906,7 +906,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 17) #define IXGBE_FLAG2_VLAN_PROMISC (u32)(1 << 18) #define IXGBE_FLAG2_RX_LEGACY (u32)(1 << 19) -#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(20) +#define IXGBE_FLAG2_AUTO_DISABLE_VF (u32)(1 << 20) +#define IXGBE_FLAG2_VLAN_STAG_RX (u32)(1 << 21) +#define IXGBE_FLAG2_VLAN_STAG_FILTER (u32)(1 << 22) /* Tx fast path data */ int num_tx_queues; @@ -1275,6 +1277,13 @@ void ixgbe_disable_tx_queue(struct ixgbe_adapter *adapter); #ifdef ETHTOOL_OPS_COMPAT int ethtool_ioctl(struct ifreq *ifr); #endif +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +u32 ixgbe_vlan_double_fix_features(struct net_device *netdev, u32 features); +#else +netdev_features_t ixgbe_vlan_double_fix_features(struct net_device *netdev, + netdev_features_t features); +#endif + #if IS_ENABLED(CONFIG_FCOE) void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); diff --git a/src/ixgbe_ethtool.c b/src/ixgbe_ethtool.c index c18fe85..c152e30 100644 --- a/src/ixgbe_ethtool.c +++ b/src/ixgbe_ethtool.c @@ -187,6 +187,10 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { #endif #define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2) "mdd-disable-vf", +#define IXGBE_PRIV_FLAGS_VLAN_STAG_RX BIT(3) + "vlan-stag-rx", +#define IXGBE_PRIV_FLAGS_VLAN_STAG_FILTER BIT(4) + "vlan-stag-filter", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) @@ -3390,7 +3394,7 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data) static int ixgbe_set_flags(struct net_device *netdev, u32 data) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; + u32 supported_flags = ETH_FLAG_RXVLAN; u32 changed = netdev->features ^ data; bool need_reset = false; int rc; @@ -3402,6 +3406,8 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data) #endif if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) supported_flags |= ETH_FLAG_LRO; + if (!(adapter->flags & IXGBE_FLAG2_VLAN_STAG_RX)) + supported_flags |= ETH_FLAG_TXVLAN; #ifdef ETHTOOL_GRXRINGS switch (adapter->hw.mac.type) { @@ -4678,6 +4684,12 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF; + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX) + priv_flags |= IXGBE_PRIV_FLAGS_VLAN_STAG_RX; + + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_FILTER) + priv_flags |= IXGBE_PRIV_FLAGS_VLAN_STAG_FILTER; + return priv_flags; } @@ -4694,6 +4706,8 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) IXGBE_SPF_NONE = 0, IXGBE_SPF_REINIT_LOCKED = (1 << 0), IXGBE_SPF_RESET = (1 << 1), + IXGBE_SPF_SET_RX_MODE = (1 << 2), + IXGBE_SPF_SET_FEATURES = (1 << 3), } do_reset = IXGBE_SPF_NONE; if (!changed) @@ -4749,14 +4763,84 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) do_reset |= IXGBE_SPF_REINIT_LOCKED; } + /* Global Double VLAN features handling */ + if (changed & IXGBE_PRIV_FLAGS_VLAN_STAG_RX) { + struct ixgbe_hw *hw = &adapter->hw; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + /* VMDq requires vlan filtering to be enbled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) && + (priv_flags & IXGBE_PRIV_FLAGS_VLAN_STAG_RX)) { + /* Turn on STAG filter by default: user might + * turn it off later if required. + */ + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX)) { +#ifndef HAVE_VLAN_RX_REGISTER + adapter->flags2 |= + (IXGBE_FLAG2_VLAN_STAG_RX| + IXGBE_FLAG2_VLAN_STAG_FILTER); +#else + /* No filtering by driver by default: there + * are setups with old kernels where filtering + * might be broken (e.g. vlan on top of macvlan) + */ + adapter->flags2 |= + (IXGBE_FLAG2_VLAN_STAG_RX); +#endif + } + break; + } + /* fall thru */ + default: + /* No filtering by outer tag without outer VLAN + * header acceleration on receive. + */ + adapter->flags2 &= ~(IXGBE_FLAG2_VLAN_STAG_RX | + IXGBE_FLAG2_VLAN_STAG_FILTER); + } + + changed &= ~IXGBE_PRIV_FLAGS_VLAN_STAG_FILTER; + do_reset |= IXGBE_SPF_SET_FEATURES; + } + + if (changed & IXGBE_PRIV_FLAGS_VLAN_STAG_FILTER) { + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX) { + adapter->flags2 ^= IXGBE_FLAG2_VLAN_STAG_FILTER; + do_reset |= IXGBE_SPF_SET_FEATURES; + } + } + + if (do_reset & IXGBE_SPF_SET_FEATURES) { + typeof(netdev->features) features = netdev->features; + + features = ixgbe_vlan_double_fix_features(netdev, features); + if (features != netdev->features) { + netdev->features = features; + netdev_features_change(netdev); + } + + do_reset |= IXGBE_SPF_SET_RX_MODE; + } + if (do_reset & IXGBE_SPF_RESET) { ixgbe_do_reset(netdev); + do_reset &= ~IXGBE_SPF_SET_RX_MODE; } else if (do_reset & IXGBE_SPF_REINIT_LOCKED) { /* reset interface to repopulate queues */ - if (netif_running(netdev)) + if (netif_running(netdev)) { ixgbe_reinit_locked(adapter); + do_reset &= ~IXGBE_SPF_SET_RX_MODE; + } } + if (do_reset & IXGBE_SPF_SET_RX_MODE) + ixgbe_set_rx_mode(netdev); + return 0; } diff --git a/src/ixgbe_main.c b/src/ixgbe_main.c index 02c57dd..d77c6fe 100644 --- a/src/ixgbe_main.c +++ b/src/ixgbe_main.c @@ -1497,22 +1497,109 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, IXGBE_CB(skb)->append_cnt = 0; } -static void ixgbe_rx_vlan(struct ixgbe_ring *ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) +static inline bool ixgbe_vlan_tag_present(struct sk_buff *skb) { - struct net_device *netdev = ring->netdev; - struct ixgbe_adapter __maybe_unused *adapter = netdev_priv(netdev); - netdev_features_t __maybe_unused features = netdev->features; +#ifdef HAVE_VLAN_RX_REGISTER + return IXGBE_CB(skb)->vid != 0; +#else + return skb_vlan_tag_present(skb); +#endif +} + +static inline void __ixgbe_hwaccel_put_tag(struct sk_buff *skb, + __be16 protocol, u16 vlan_tci) +{ +#ifdef HAVE_VLAN_RX_REGISTER +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + IXGBE_CB(skb)->vid = vlan_tci; +#else + __vlan_hwaccel_put_tag(skb, protocol, vlan_tci); +#endif +} + +static inline bool ixgbe_vlan_double_filter(struct net_device *netdev, u16 tci) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + u16 vid = tci & VLAN_VID_MASK; + + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_FILTER)) + return false; + + if (netdev->features & NETIF_F_RXALL) + return false; + + if (netdev->flags & IFF_PROMISC) + return false; + +#ifdef HAVE_VLAN_RX_REGISTER + return !adapter->vlgrp || !vlan_group_get_device(adapter->vlgrp, vid); +#else + return !test_bit(vid, adapter->active_vlans); +#endif +} + +static void ixgbe_rx_vlan_untag(struct sk_buff *skb) +{ +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + const u32 *a; + u32 *b; + + BUILD_BUG_ON(sizeof(u32) != VLAN_HLEN || + 2 * ETH_ALEN / sizeof(u32) != 3); + + a = (void *)skb->data + 2 * ETH_ALEN; + b = (void *)skb->data + 2 * ETH_ALEN + VLAN_HLEN; + + *--b = *--a; + *--b = *--a; + *--b = *--a; +#else + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); +#endif + /* Need not checksum updating here as driver never returns + * CHECKSUM_COMPLETE so just pull VLAN_HLEN from the skb here. + */ + __skb_pull(skb, VLAN_HLEN); +} + +static bool ixgbe_process_vlans(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *netdev = rx_ring->netdev; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)skb->data; + __be16 protocol; + u16 tci; int enable; + if (WARN_ON_ONCE(ixgbe_vlan_tag_present(skb))) + return false; + + enable = !!(adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX); + + if (enable && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VEXT)) { + protocol = vhdr->h_vlan_proto; + tci = ntohs(vhdr->h_vlan_TCI); + if (ixgbe_vlan_double_filter(netdev, tci)) { + dev_kfree_skb_any(skb); + return true; + } +#ifdef HAVE_VLAN_RX_REGISTER + if (adapter->vlgrp) +#endif + __ixgbe_hwaccel_put_tag(skb, protocol, tci); + } + #ifdef HAVE_VLAN_RX_REGISTER enable = !!adapter->vlgrp; #if defined(HAVE_NDO_SET_FEATURES) || defined(ETHTOOL_GFLAGS) #ifdef NETIF_F_HW_VLAN_CTAG_RX - enable &= !!(features & NETIF_F_HW_VLAN_CTAG_RX); + enable &= !!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX); #else - enable &= !!(features & NETIF_F_HW_VLAN_RX); + enable &= !!(netdev->features & NETIF_F_HW_VLAN_RX); #endif #endif /* HAVE_NDO_SET_FEATURES || ETHTOOL_GFLAGS */ #if IS_ENABLED(CONFIG_DCB) @@ -1520,22 +1607,27 @@ static void ixgbe_rx_vlan(struct ixgbe_ring *ring, #endif /* CONFIG_DCB */ #else /* !HAVE_VLAN_RX_REGISTER */ #ifdef NETIF_F_HW_VLAN_CTAG_RX - enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + enable = !!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX); #else - enable = !!(features & NETIF_F_HW_VLAN_RX); + enable = !!(netdev->features & NETIF_F_HW_VLAN_RX); #endif #endif /* HAVE_VLAN_RX_REGISTER */ - if (enable && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) -#ifndef HAVE_VLAN_RX_REGISTER - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - le16_to_cpu(rx_desc->wb.upper.vlan)); -#else - IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); - else - IXGBE_CB(skb)->vid = 0; -#endif + if (enable && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { + protocol = htons(ETH_P_8021Q); + tci = le16_to_cpu(rx_desc->wb.upper.vlan); + if (ixgbe_vlan_tag_present(skb)) { + vhdr->h_vlan_proto = protocol; + vhdr->h_vlan_TCI = htons(tci); + } else { + __ixgbe_hwaccel_put_tag(skb, protocol, tci); + } + } else { + if (ixgbe_vlan_tag_present(skb)) + ixgbe_rx_vlan_untag(skb); + } + + return false; } /** @@ -1547,8 +1639,10 @@ static void ixgbe_rx_vlan(struct ixgbe_ring *ring, * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, timestamp, protocol, and * other fields within the skb. + * + * Returns true if an error was encountered and skb was freed. **/ -void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, +bool ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { @@ -1556,6 +1650,9 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, u32 flags = rx_ring->q_vector->adapter->flags; #endif + if (ixgbe_process_vlans(rx_ring, rx_desc, skb)) + return true; + ixgbe_update_rsc_stats(rx_ring, skb); #ifdef NETIF_F_RXHASH @@ -1568,10 +1665,10 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); #endif - ixgbe_rx_vlan(rx_ring, rx_desc, skb); - skb_record_rx_queue(skb, ring_queue_index(rx_ring)); skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); + + return false; } void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, @@ -2355,12 +2452,12 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb)) continue; - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - /* populate checksum, timestamp, VLAN, and protocol */ - ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + if (ixgbe_process_skb_fields(rx_ring, rx_desc, skb)) + continue; + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; #if IS_ENABLED(CONFIG_FCOE) /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { @@ -2519,12 +2616,12 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, continue; } - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - /* populate checksum, timestamp, VLAN, and protocol */ - ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + if (ixgbe_process_skb_fields(rx_ring, rx_desc, skb)) + continue; + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; #if IS_ENABLED(CONFIG_FCOE) /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { @@ -5034,6 +5131,49 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) #endif } +/** + * ixgbe_vlan_double_enable - helper to enable second vlan tag processing + * @adapter: driver data + */ +static void ixgbe_vlan_double_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl_ext, dmatxctl, exvet; + __le16 vlan_proto = cpu_to_le16(ETH_P_8021Q); + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + exvet = IXGBE_READ_REG(hw, IXGBE_EXVET); + + ctrl_ext |= IXGBE_CTRL_EXT_VLAN; + dmatxctl |= IXGBE_DMATXCTL_GDV; + exvet &= ~(0xffff << IXGBE_EXVET_VET_EXT_SHIFT); + exvet |= (__force u32)vlan_proto << IXGBE_EXVET_VET_EXT_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_EXVET, exvet); + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); +} + +/** + * ixgbe_vlan_double_disable - helper to disable second vlan tag processing + * @adapter: driver data + */ +static void ixgbe_vlan_double_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl_ext, dmatxctl; + + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + + ctrl_ext &= ~IXGBE_CTRL_EXT_VLAN; + dmatxctl &= ~IXGBE_DMATXCTL_GDV; + + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); +} + /** * ixgbe_vlan_strip_disable - helper to disable vlan tag stripping * @adapter: driver data @@ -5288,6 +5428,15 @@ void ixgbe_vlan_mode(struct net_device *netdev, netdev_features_t features) } #endif /* !HAVE_NDO_SET_FEATURES && !ETHTOOL_GFLAGS */ #endif /* HAVE_VLAN_RX_REGISTER */ + + enable = !!(adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX); + + if (enable) + /* enable SVLAN tag processing */ + ixgbe_vlan_double_enable(adapter); + else + /* disable SVLAN tag processing */ + ixgbe_vlan_double_disable(adapter); } static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) @@ -5643,19 +5792,30 @@ void ixgbe_set_rx_mode(struct net_device *netdev) #endif /* HAVE_VLAN_RX_REGISTER */ } else { #ifdef HAVE_VLAN_RX_REGISTER - int enable; + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX)) { + int enable; - enable = !!adapter->vlgrp; + enable = !!adapter->vlgrp; #ifdef HAVE_NDO_SET_FEATURES #ifdef NETIF_F_HW_VLAN_CTAG_RX - enable &= !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + enable &= !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); #else - enable &= !!(features & NETIF_F_HW_VLAN_FILTER); + enable &= !!(features & NETIF_F_HW_VLAN_FILTER); #endif #endif /* HAVE_NDO_SET_FEATURES */ - if (enable) - /* enable hardware vlan filtering */ - vlnctrl |= IXGBE_VLNCTRL_VFE; + if (enable) + /* enable hardware vlan filtering */ + vlnctrl |= IXGBE_VLNCTRL_VFE; + } +#else /* HAVE_VLAN_RX_REGISTER */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX) { +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_FILTER + features &= ~NETIF_F_HW_VLAN_FILTER; +#endif + } #endif /* HAVE_VLAN_RX_REGISTER */ if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; @@ -11504,6 +11664,61 @@ void ixgbe_do_reset(struct net_device *netdev) ixgbe_reset(adapter); } +#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT +u32 ixgbe_vlan_double_fix_features(struct net_device *netdev, u32 features) +#else +netdev_features_t ixgbe_vlan_double_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX) { + /* User might turn off VLAN header stripping + * by hardware: receiving routine will pop + * outer tag in this case. There is neligible + * performance penalty for this. + */ + + /* Turn off VLAN header insertion by hardware + * and let network stack push them in correct + * order: overwise hardware expects to find + * outer header in transmit buffer which isn't + * valid for encapsulation process for stacked + * vlans. + */ +#ifdef NETIF_F_HW_VLAN_CTAG_TX + features &= ~NETIF_F_HW_VLAN_CTAG_TX; +#endif +#ifdef NETIF_F_HW_VLAN_TX + features &= ~NETIF_F_HW_VLAN_TX; +#endif +#ifdef NETIF_F_HW_VLAN_STAG_RX + features |= NETIF_F_HW_VLAN_STAG_RX; + } else { + features &= ~NETIF_F_HW_VLAN_STAG_RX; +#endif + } + + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_FILTER) { + /* vlan_hw_filter_capable() has fixed mapping + * between STAG/CTAG and VLAN Ethernet Type: + * to support STAG filtering for ETH_P_8021Q + * type we need to enable CTAG filters too. + */ +#ifdef NETIF_F_HW_VLAN_CTAG_FILTER + features |= NETIF_F_HW_VLAN_CTAG_FILTER; +#endif +#ifdef NETIF_F_HW_VLAN_STAG_FILTER + features |= NETIF_F_HW_VLAN_STAG_FILTER; + } else { + features &= ~NETIF_F_HW_VLAN_STAG_FILTER; +#endif + } + + return features; +} + #ifdef HAVE_NDO_SET_FEATURES #ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT static u32 ixgbe_fix_features(struct net_device *netdev, u32 features) @@ -11536,7 +11751,7 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, features &= ~NETIF_F_LRO; } - return features; + return ixgbe_vlan_double_fix_features(netdev, features); } #ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT @@ -13065,8 +13280,10 @@ static int ixgbe_probe(struct pci_dev *pdev, netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); #endif - #endif + netdev->features = + ixgbe_vlan_double_fix_features(netdev, netdev->features); + #if IS_ENABLED(CONFIG_DCB) if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) netdev->dcbnl_ops = &ixgbe_dcbnl_ops; diff --git a/src/ixgbe_param.c b/src/ixgbe_param.c index 71197b7..b98a15b 100644 --- a/src/ixgbe_param.c +++ b/src/ixgbe_param.c @@ -327,6 +327,13 @@ IXGBE_PARAM(dmac_watchdog, IXGBE_PARAM(vxlan_rx, "VXLAN receive checksum offload (0,1), default 1 = Enable"); +/* Enable/disable Double VLAN support + * + * Valid Range: 0, 1 + * + * Default Value: 0 + */ +IXGBE_PARAM(DV, "Enable/disable Double VLAN support on 82599+ adapters, default 0 = Disable"); struct ixgbe_option { enum { enable_option, range_option, list_option } type; @@ -1266,4 +1273,63 @@ void ixgbe_check_options(struct ixgbe_adapter *adapter) break; } } + { /* DV - Enable Double VLAN support */ +#ifndef HAVE_VLAN_RX_REGISTER + const u32 vlan_stag_rx = IXGBE_FLAG2_VLAN_STAG_RX| + IXGBE_FLAG2_VLAN_STAG_FILTER; +#else + /* No filtering by driver by default: there + * are setups with old kernels where filtering + * might be broken (e.g. vlan on top of macvlan) + */ + const u32 vlan_stag_rx = IXGBE_FLAG2_VLAN_STAG_RX; +#endif + + adapter->flags2 &= ~vlan_stag_rx; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: { + struct ixgbe_option opt = { + .type = enable_option, + .name = "Double VLAN", + .err = "defaulting to 0 (disabled)", + .def = OPTION_DISABLED, + }; + +#ifdef module_param_array + if (num_DV > bd) { +#endif + unsigned int dv = DV[bd]; + + ixgbe_validate_option(adapter->netdev, + &dv, &opt); + if (dv) + adapter->flags2 |= vlan_stag_rx; +#ifdef module_param_array + } else { + if (opt.def == OPTION_ENABLED) + adapter->flags2 |= vlan_stag_rx; + } +#endif + } + break; + default: + break; + } + + /* Check Interoperability */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX) { + if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) { + DPRINTK(PROBE, INFO, + "Double VLAN is not supported while VMDq " + "enabled. " + "Disabling Double VLAN.\n"); + adapter->flags2 &= ~vlan_stag_rx; + } + } + } } diff --git a/src/ixgbe_sriov.c b/src/ixgbe_sriov.c index c6622c3..218cc8c 100644 --- a/src/ixgbe_sriov.c +++ b/src/ixgbe_sriov.c @@ -58,6 +58,10 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, } #endif /* HAVE_XDP_SUPPORT */ + /* VMDq requires vlan filtering to be enbled */ + if (adapter->flags2 & IXGBE_FLAG2_VLAN_STAG_RX) + return -EINVAL; + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; /* Enable VMDq flag so device will be set in VM mode */ diff --git a/src/ixgbe_txrx_common.h b/src/ixgbe_txrx_common.h index 0b3a3de..596349d 100644 --- a/src/ixgbe_txrx_common.h +++ b/src/ixgbe_txrx_common.h @@ -64,7 +64,7 @@ bool ixgbe_xsk_any_rx_ring_enabled(struct ixgbe_adapter *adapter); bool ixgbe_cleanup_headers(struct ixgbe_ring __maybe_unused *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); -void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, +bool ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, diff --git a/src/ixgbe_type.h b/src/ixgbe_type.h index d85bd9b..638240f 100644 --- a/src/ixgbe_type.h +++ b/src/ixgbe_type.h @@ -544,6 +544,8 @@ struct ixgbe_nvm_version { #define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ +#define IXGBE_EXVET_VET_EXT_SHIFT 16 /* Global Double VLAN EtherType */ + #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ /* Anti-spoofing defines */ @@ -1456,6 +1458,7 @@ struct ixgbe_dmac_config { #define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ #define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ #define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_VLAN 0x04000000 /* Extended VLAN */ #define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ /* Direct Cache Access (DCA) definitions */ diff --git a/src/ixgbe_xsk.c b/src/ixgbe_xsk.c index 73620d1..cce22db 100644 --- a/src/ixgbe_xsk.c +++ b/src/ixgbe_xsk.c @@ -749,10 +749,12 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, if (eth_skb_pad(skb)) continue; + if (ixgbe_process_skb_fields(rx_ring, rx_desc, skb)) + continue; + total_rx_bytes += skb->len; total_rx_packets++; - ixgbe_process_skb_fields(rx_ring, rx_desc, skb); ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); }