More features may be added in future firmware and new versions of the VIC.
Please refer to the release notes.
+.. _overlay_offload:
+
+Overlay Offload
+---------------
+
+Recent hardware models support overlay offload. When enabled, the NIC performs
+the following operations for VXLAN, NVGRE, and GENEVE packets. In all cases,
+inner and outer packets can be IPv4 or IPv6.
+
+- TSO for VXLAN and GENEVE packets.
+
+ Hardware supports NVGRE TSO, but DPDK currently has no NVGRE offload flags.
+
+- Tx checksum offloads.
+
+ The NIC fills in IPv4/UDP/TCP checksums for both inner and outer packets.
+
+- Rx checksum offloads.
+
+ The NIC validates IPv4/UDP/TCP checksums of both inner and outer packets.
+ Good checksum flags (e.g. ``PKT_RX_L4_CKSUM_GOOD``) indicate that the inner
+ packet has the correct checksum, and if applicable, the outer packet also
+ has the correct checksum. Bad checksum flags (e.g. ``PKT_RX_L4_CKSUM_BAD``)
+ indicate that the inner and/or outer packets have invalid checksum values.
+
+- Inner Rx packet type classification
+
+ PMD sets inner L3/L4 packet types (e.g. ``RTE_PTYPE_INNER_L4_TCP``), and
+ ``RTE_PTYPE_TUNNEL_GRENAT`` to indicate that the packet is tunneled.
+ PMD does not set L3/L4 packet types for outer packets.
+
+- Inner RSS
+
+ RSS hash calculation, therefore queue selection, is done on inner packets.
+
+In order to enable overlay offload, the 'Enable VXLAN' box should be checked
+via CIMC or UCSM followed by a reboot of the server. When PMD successfully
+enables overlay offload, it prints the following message on the console.
+
+.. code-block:: console
+
+ Overlay offload is enabled
+
+By default, PMD enables overlay offload if hardware supports it. To disable
+it, set ``devargs`` parameter ``disable-overlay=1``. For example::
+
+ -w 12:00.0,disable-overlay=1
+
.. _enic_limitations:
Limitations
- MTU update
- SR-IOV on UCS managed servers connected to Fabric Interconnects
- Flow API
+- Overlay offload
+
+ - Rx/Tx checksum offloads for VXLAN, NVGRE, GENEVE
+ - TSO for VXLAN and GENEVE packets
+ - Inner RSS
Known bugs and unsupported features in this release
---------------------------------------------------
return ret;
}
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+ u64 a0 = overlay;
+ u64 a1 = config;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number)
+{
+ u64 a1 = vxlan_udp_port_number;
+ u64 a0 = overlay;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
+{
+ u64 a0 = VIC_FEATURE_VXLAN;
+ u64 a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
+ return ret == 0 &&
+ (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
+ (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
+}
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_v2 *data, struct filter_action_v2 *action_v2);
-#ifdef ENIC_VXLAN
-int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
u8 overlay, u8 config);
int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number);
-#endif
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
VIC_FEATURE_MAX,
} vic_feature_t;
+/*
+ * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER
+ * to indicate the host driver about the VxLAN and Multi WQ features
+ * supported
+ */
+#define FEATURE_VXLAN_IPV6_INNER (1 << 0)
+#define FEATURE_VXLAN_IPV6_OUTER (1 << 1)
+#define FEATURE_VXLAN_MULTI_WQ (1 << 2)
+
+#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \
+ FEATURE_VXLAN_IPV6_OUTER)
+
/*
* CMD_CONFIG_GRPINTR subcommands
*/
struct vnic_wq {
unsigned int index;
+ uint64_t tx_offload_notsup_mask;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
#include "vnic_rss.h"
#include "enic_res.h"
#include "cq_enet_desc.h"
+#include <stdbool.h>
#include <sys/queue.h>
#include <rte_spinlock.h>
struct vnic_dev *vdev;
unsigned int port_id;
+ bool overlay_offload;
struct rte_eth_dev *rte_dev;
struct enic_fdir fdir;
char bdf_name[ENICPMD_BDF_LENGTH];
u8 adv_filters;
u32 flow_filter_mode;
u8 filter_actions; /* HW supported actions */
+ bool vxlan;
+ bool disable_overlay; /* devargs disable_overlay=1 */
unsigned int flags;
unsigned int priv_flags;
uint64_t rss_hf; /* ETH_RSS flags */
union vnic_rss_key rss_key;
union vnic_rss_cpu rss_cpu;
+
+ uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
+ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_offload_mask; /* PKT_TX flags accepted */
};
/* Compute ethdev's max packet size from MTU */
#include <rte_bus_pci.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
#include <rte_string_fns.h>
#include "vnic_intr.h"
{.vendor_id = 0, /* sentinel */},
};
-#define ENIC_TX_OFFLOAD_CAPA ( \
- DEV_TX_OFFLOAD_VLAN_INSERT | \
- DEV_TX_OFFLOAD_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_UDP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_CKSUM | \
- DEV_TX_OFFLOAD_TCP_TSO)
-
-#define ENIC_RX_OFFLOAD_CAPA ( \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_IPV4_CKSUM | \
- DEV_RX_OFFLOAD_UDP_CKSUM | \
- DEV_RX_OFFLOAD_TCP_CKSUM)
+#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
RTE_INIT(enicpmd_init_log);
static void
*/
device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
- device_info->rx_offload_capa = ENIC_RX_OFFLOAD_CAPA;
- device_info->tx_offload_capa = ENIC_TX_OFFLOAD_CAPA;
+ device_info->rx_offload_capa = enic->rx_offload_capa;
+ device_info->tx_offload_capa = enic->tx_offload_capa;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
* Except VLAN stripping (port setting), all the checksum offloads
* are always enabled.
*/
- conf->offloads = ENIC_RX_OFFLOAD_CAPA;
+ conf->offloads = enic->rx_offload_capa;
if (!enic->ig_vlan_strip_en)
conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* rx_thresh and other fields are not applicable for enic */
ENICPMD_FUNC_TRACE();
qinfo->nb_desc = enic->config.wq_desc_count;
memset(&qinfo->conf, 0, sizeof(qinfo->conf));
- qinfo->conf.offloads = ENIC_TX_OFFLOAD_CAPA; /* not configurable */
+ qinfo->conf.offloads = enic->tx_offload_capa;
/* tx_thresh, and all the other fields are not applicable for enic */
}
.rss_hash_update = enicpmd_dev_rss_hash_update,
};
+static int enic_parse_disable_overlay(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "0") == 0) {
+ enic->disable_overlay = false;
+ } else if (strcmp(value, "1") == 0) {
+ enic->disable_overlay = true;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
+ ": expected=0|1 given=%s\n", value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_check_devargs(struct rte_eth_dev *dev)
+{
+ static const char *const valid_keys[] = {
+ ENIC_DEVARG_DISABLE_OVERLAY, NULL};
+ struct enic *enic = pmd_priv(dev);
+ struct rte_kvargs *kvlist;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->disable_overlay = false;
+ if (!dev->device->devargs)
+ return 0;
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+ if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
+ enic_parse_disable_overlay, enic) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
struct rte_pci_device *pdev;
struct rte_pci_addr *addr;
struct enic *enic = pmd_priv(eth_dev);
+ int err;
ENICPMD_FUNC_TRACE();
snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
addr->domain, addr->bus, addr->devid, addr->function);
+ err = enic_check_devargs(eth_dev);
+ if (err)
+ return err;
return enic_probe(enic);
}
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_enic,
+ ENIC_DEVARG_DISABLE_OVERLAY "=<0|1> ");
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask =
+ PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ enic->overlay_offload = false;
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->tx_offload_capa |=
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ /*
+ * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
+ * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
+ */
+ enic->tx_offload_mask |=
+ PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_MASK;
+ enic->overlay_offload = true;
+ dev_info(enic, "Overlay offload is enabled\n");
+ }
+
return 0;
}
if (!ENIC_SETTING(enic, RSS))
enic->flow_type_rss_offloads = 0;
+ enic->vxlan = ENIC_SETTING(enic, VXLAN) &&
+ vnic_dev_capable_vxlan(enic->vdev);
+ /*
+ * Default hardware capabilities. enic_dev_init() may add additional
+ * flags if it enables overlay offloads.
+ */
+ enic->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ enic->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ enic->tx_offload_mask =
+ PKT_TX_VLAN_PKT |
+ PKT_TX_IP_CKSUM |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG;
+
return 0;
}
#include <rte_ip.h>
#include <rte_tcp.h>
-#define ENIC_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
-
-#define ENIC_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
-
#define RTE_PMD_USE_PREFETCH
#ifdef RTE_PMD_USE_PREFETCH
/* Lookup table to translate RX CQ flags to mbuf flags. */
static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
uint8_t cqrd_flags = cqrd->flags;
+ /*
+ * Odd-numbered entries are for tunnel packets. All packet type info
+ * applies to the inner packet, and there is no info on the outer
+ * packet. The outer flags in these entries exist only to avoid
+ * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
+ * afterwards.
+ *
+ * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
+ * RTE_PTYPE_TUNNEL_GRENAT..
+ */
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
- [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
- return cq_type_table[cqrd_flags];
+ return cq_type_table[cqrd_flags + tnl];
}
static inline void
uint32_t l4_flags;
l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
- if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
- pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
- else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4)
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ /*
+ * When overlay offload is enabled, the NIC may
+ * set ipv4_csum_ok=1 if the inner packet is IPv6..
+ * So, explicitly check for IPv4 before checking
+ * ipv4_csum_ok.
+ */
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ }
if (l4_flags == RTE_PTYPE_L4_UDP ||
l4_flags == RTE_PTYPE_L4_TCP) {
struct vnic_cq *cq;
volatile struct cq_desc *cqd_ptr;
uint8_t color;
+ uint8_t tnl;
uint16_t seg_length;
struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
continue;
}
+ /*
+ * When overlay offload is enabled, CQ.fcoe indicates the
+ * packet is tunnelled.
+ */
+ tnl = enic->overlay_offload &&
+ (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
/* cq rx flags are only valid if eop bit is set */
- first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
+ first_seg->packet_type =
+ enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
enic_cq_rx_to_pkt_flags(&cqd, first_seg);
-
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
if (unlikely(packet_error)) {
rte_pktmbuf_free(first_seg);
rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
return 0;
}
-uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
int32_t ret;
uint16_t i;
uint64_t ol_flags;
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
+ if (ol_flags & wq->tx_offload_notsup_mask) {
rte_errno = -ENOTSUP;
return i;
}
offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
mss = tx_pkt->tso_segsz;
+ /* For tunnel, need the size of outer+inner headers */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ header_len += tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
+ }
}
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {