drivers/net: remove redundant new line from logs
[dpdk.git] / drivers / net / i40e / i40e_rxtx.c
index 58aec9f..608685f 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,8 @@
 #include <rte_tcp.h>
 #include <rte_sctp.h>
 #include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
 
 #include "i40e_logs.h"
 #include "base/i40e_prototype.h"
 #define I40E_TX_CKSUM_OFFLOAD_MASK (            \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
+               PKT_TX_TCP_SEG |                 \
                PKT_TX_OUTER_IP_CKSUM)
 
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
-       (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+#define I40E_TX_OFFLOAD_MASK (  \
+               PKT_TX_IP_CKSUM |       \
+               PKT_TX_L4_MASK |        \
+               PKT_TX_OUTER_IP_CKSUM | \
+               PKT_TX_TCP_SEG |        \
+               PKT_TX_QINQ_PKT |       \
+               PKT_TX_VLAN_PKT)
 
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       ((uint64_t)((mb)->buf_physaddr + (mb)->data_off))
+#define I40E_TX_OFFLOAD_NOTSUP_MASK \
+               (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
 
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
-                          const char *ring_name,
-                          uint16_t queue_id,
-                          uint32_t ring_size,
-                          int socket_id);
 static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
                                      struct rte_mbuf **tx_pkts,
                                      uint16_t nb_pkts);
@@ -99,7 +101,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
 {
        if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
                (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
-               mb->ol_flags |= PKT_RX_VLAN_PKT;
+               mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
                mb->vlan_tci =
                        rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1);
                PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u",
@@ -110,7 +112,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp)
 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
        if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
                (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
-               mb->ol_flags |= PKT_RX_QINQ_PKT;
+               mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
                mb->vlan_tci_outer = mb->vlan_tci;
                mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
                PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
@@ -149,29 +151,23 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
        uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
 
 #define I40E_RX_ERR_BITS 0x3f
-       if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
-               return flags;
-       /* If RXE bit set, all other status bits are meaningless */
-       if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
-               flags |= PKT_RX_MAC_ERR;
+       if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
+               flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
                return flags;
        }
 
-       /* If RECIPE bit set, all other status indications should be ignored */
-       if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) {
-               flags |= PKT_RX_RECIP_ERR;
-               return flags;
-       }
-       if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT)))
-               flags |= PKT_RX_HBUF_OVERFLOW;
        if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
                flags |= PKT_RX_IP_CKSUM_BAD;
+       else
+               flags |= PKT_RX_IP_CKSUM_GOOD;
+
        if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
                flags |= PKT_RX_L4_CKSUM_BAD;
+       else
+               flags |= PKT_RX_L4_CKSUM_GOOD;
+
        if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
                flags |= PKT_RX_EIP_CKSUM_BAD;
-       if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT)))
-               flags |= PKT_RX_OVERSIZE;
 
        return flags;
 }
@@ -200,539 +196,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
 }
 #endif
 
-/* For each value it means, datasheet of hardware can tell more details */
-static inline uint32_t
-i40e_rxd_pkt_type_mapping(uint8_t ptype)
-{
-       static const uint32_t ptype_table[UINT8_MAX] __rte_cache_aligned = {
-               /* L2 types */
-               /* [0] reserved */
-               [1] = RTE_PTYPE_L2_ETHER,
-               [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
-               /* [3] - [5] reserved */
-               [6] = RTE_PTYPE_L2_ETHER_LLDP,
-               /* [7] - [10] reserved */
-               [11] = RTE_PTYPE_L2_ETHER_ARP,
-               /* [12] - [21] reserved */
-
-               /* Non tunneled IPv4 */
-               [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_FRAG,
-               [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_NONFRAG,
-               [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_UDP,
-               /* [25] reserved */
-               [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_TCP,
-               [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_SCTP,
-               [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_ICMP,
-
-               /* IPv4 --> IPv4 */
-               [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [32] reserved */
-               [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv4 --> IPv6 */
-               [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [39] reserved */
-               [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv4 --> GRE/Teredo/VXLAN */
-               [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
-               [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [47] reserved */
-               [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
-               [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [54] reserved */
-               [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
-               [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
-               [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [62] reserved */
-               [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
-               [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [69] reserved */
-               [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
-               [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
-               [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [77] reserved */
-               [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
-               [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [84] reserved */
-               [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* Non tunneled IPv6 */
-               [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_FRAG,
-               [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_NONFRAG,
-               [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_UDP,
-               /* [91] reserved */
-               [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_TCP,
-               [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_SCTP,
-               [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_L4_ICMP,
-
-               /* IPv6 --> IPv4 */
-               [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [98] reserved */
-               [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv6 --> IPv6 */
-               [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [105] reserved */
-               [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_IP |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv6 --> GRE/Teredo/VXLAN */
-               [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
-               [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [113] reserved */
-               [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
-               [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [120] reserved */
-               [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
-               [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
-               [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [128] reserved */
-               [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
-               [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [135] reserved */
-               [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
-               [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
-               [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [143] reserved */
-               [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
-               [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_FRAG,
-               [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_NONFRAG,
-               [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_UDP,
-               /* [150] reserved */
-               [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_TCP,
-               [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_SCTP,
-               [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_TUNNEL_GRENAT |
-                       RTE_PTYPE_INNER_L2_ETHER_VLAN |
-                       RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                       RTE_PTYPE_INNER_L4_ICMP,
-
-               /* All others reserved */
-       };
-
-       return ptype_table[ptype];
-}
-
 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK   0x03
 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01
 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX   0x02
@@ -775,33 +238,65 @@ i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
 #endif
        return flags;
 }
+
+static inline void
+i40e_parse_tunneling_params(uint64_t ol_flags,
+                           union i40e_tx_offload tx_offload,
+                           uint32_t *cd_tunneling)
+{
+       /* EIPT: External (outer) IP header type */
+       if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+       else if (ol_flags & PKT_TX_OUTER_IPV4)
+               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+       else if (ol_flags & PKT_TX_OUTER_IPV6)
+               *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+       /* EIPLEN: External (outer) IP header length, in DWords */
+       *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
+               I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+       /* L4TUNT: L4 Tunneling Type */
+       switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+       case PKT_TX_TUNNEL_IPIP:
+               /* for non UDP / GRE tunneling, set to 00b */
+               break;
+       case PKT_TX_TUNNEL_VXLAN:
+       case PKT_TX_TUNNEL_GENEVE:
+               *cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
+               break;
+       case PKT_TX_TUNNEL_GRE:
+               *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
+               break;
+       default:
+               PMD_TX_LOG(ERR, "Tunnel type not supported");
+               return;
+       }
+
+       /* L4TUNLEN: L4 Tunneling Length, in Words
+        *
+        * We depend on app to set rte_mbuf.l2_len correctly.
+        * For IP in GRE it should be set to the length of the GRE
+        * header;
+        * for MAC in GRE or MAC in UDP it should be set to the length
+        * of the GRE or UDP headers plus the inner MAC up to including
+        * its last Ethertype.
+        */
+       *cd_tunneling |= (tx_offload.l2_len >> 1) <<
+               I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+}
+
 static inline void
 i40e_txd_enable_checksum(uint64_t ol_flags,
                        uint32_t *td_cmd,
                        uint32_t *td_offset,
-                       union i40e_tx_offload tx_offload,
-                       uint32_t *cd_tunneling)
+                       union i40e_tx_offload tx_offload)
 {
-       /* UDP tunneling packet TX checksum offload */
-       if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
-
+       /* Set MACLEN */
+       if (ol_flags & PKT_TX_TUNNEL_MASK)
                *td_offset |= (tx_offload.outer_l2_len >> 1)
                                << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
-
-               if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
-                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
-               else if (ol_flags & PKT_TX_OUTER_IPV4)
-                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
-               else if (ol_flags & PKT_TX_OUTER_IPV6)
-                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-
-               /* Now set the ctx descriptor fields */
-               *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
-                               I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
-                               (tx_offload.l2_len >> 1) <<
-                               I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
-       } else
+       else
                *td_offset |= (tx_offload.l2_len >> 1)
                        << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
@@ -849,17 +344,6 @@ i40e_txd_enable_checksum(uint64_t ol_flags,
        }
 }
 
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
-       struct rte_mbuf *m;
-
-       m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, 0);
-
-       return m;
-}
-
 /* Construct the tx flags */
 static inline uint64_t
 i40e_build_ctob(uint32_t td_cmd,
@@ -941,15 +425,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
                             "rxq->rx_free_thresh=%d",
                             rxq->nb_rx_desc, rxq->rx_free_thresh);
                ret = -EINVAL;
-       } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
-                               RTE_PMD_I40E_RX_MAX_BURST))) {
-               PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
-                            "rxq->nb_rx_desc=%d, "
-                            "I40E_MAX_RING_DESC=%d, "
-                            "RTE_PMD_I40E_RX_MAX_BURST=%d",
-                            rxq->nb_rx_desc, I40E_MAX_RING_DESC,
-                            RTE_PMD_I40E_RX_MAX_BURST);
-               ret = -EINVAL;
        }
 #else
        ret = -EINVAL;
@@ -1001,6 +476,8 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
                                        I40E_RXD_QW1_STATUS_SHIFT;
                }
 
+               rte_smp_rmb();
+
                /* Compute how many status bits were set */
                for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
                        nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
@@ -1104,14 +581,14 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
                mb->nb_segs = 1;
                mb->port = rxq->port_id;
                dma_addr = rte_cpu_to_le_64(\
-                       RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
+                       rte_mbuf_data_dma_addr_default(mb));
                rxdp[i].read.hdr_addr = 0;
                rxdp[i].read.pkt_addr = dma_addr;
        }
 
        /* Update rx tail regsiter */
        rte_wmb();
-       I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+       I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
 
        rxq->rx_free_trigger =
                (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -1187,6 +664,14 @@ i40e_recv_pkts_bulk_alloc(void *rx_queue,
 
        return nb_rx;
 }
+#else
+static uint16_t
+i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
+                         struct rte_mbuf __rte_unused **rx_pkts,
+                         uint16_t __rte_unused nb_pkts)
+{
+       return 0;
+}
 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
 
 uint16_t
@@ -1225,7 +710,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
-               nmb = rte_rxmbuf_alloc(rxq->mp);
+               nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(!nmb))
                        break;
                rxd = *rxdp;
@@ -1251,7 +736,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
                rxdp->read.hdr_addr = 0;
                rxdp->read.pkt_addr = dma_addr;
 
@@ -1336,7 +821,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
                if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
-               nmb = rte_rxmbuf_alloc(rxq->mp);
+               nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(!nmb))
                        break;
                rxd = *rxdp;
@@ -1362,7 +847,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
                rxm = rxe->mbuf;
                rxe->mbuf = nmb;
                dma_addr =
-                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
 
                /* Set data buffer address and data length of the mbuf */
                rxdp->read.hdr_addr = 0;
@@ -1435,10 +920,10 @@ i40e_recv_scattered_pkts(void *rx_queue,
                        i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
                        I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
                if (pkt_flags & PKT_RX_RSS_HASH)
-                       rxm->hash.rss =
+                       first_seg->hash.rss =
                                rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
                if (pkt_flags & PKT_RX_FDIR)
-                       pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+                       pkt_flags |= i40e_rxd_build_fdir(&rxd, first_seg);
 
 #ifdef RTE_LIBRTE_IEEE1588
                pkt_flags |= i40e_get_iee15888_flags(first_seg, qword1);
@@ -1483,13 +968,14 @@ i40e_calc_context_desc(uint64_t flags)
 {
        static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
                PKT_TX_TCP_SEG |
-               PKT_TX_QINQ_PKT;
+               PKT_TX_QINQ_PKT |
+               PKT_TX_TUNNEL_MASK;
 
 #ifdef RTE_LIBRTE_IEEE1588
        mask |= PKT_TX_IEEE1588_TMST;
 #endif
 
-       return ((flags & mask) ? 1 : 0);
+       return (flags & mask) ? 1 : 0;
 }
 
 /* set i40e TSO context descriptor */
@@ -1505,7 +991,7 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
        }
 
        /**
-        * in case of tunneling packet, the outer_l2_len and
+        * in case of non tunneling packet, the outer_l2_len and
         * outer_l3_len must be 0.
         */
        hdr_len = tx_offload.outer_l2_len +
@@ -1622,12 +1108,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                /* Always enable CRC offload insertion */
                td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
-               /* Enable checksum offloading */
+               /* Fill in tunneling parameters if necessary */
                cd_tunneling_params = 0;
-               if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
-                       i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
-                               tx_offload, &cd_tunneling_params);
-               }
+               if (ol_flags & PKT_TX_TUNNEL_MASK)
+                       i40e_parse_tunneling_params(ol_flags, tx_offload,
+                                                   &cd_tunneling_params);
+               /* Enable checksum offloading */
+               if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)
+                       i40e_txd_enable_checksum(ol_flags, &td_cmd,
+                                                &td_offset, tx_offload);
 
                if (nb_ctx) {
                        /* Setup TX context descriptor if required */
@@ -1697,7 +1186,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                        /* Setup TX Descriptor */
                        slen = m_seg->data_len;
-                       buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+                       buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
 
                        PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
                                "buf_dma_addr: %#"PRIx64";\n"
@@ -1746,7 +1235,7 @@ end_of_tx:
                   (unsigned) txq->port_id, (unsigned) txq->queue_id,
                   (unsigned) tx_id, (unsigned) nb_tx);
 
-       I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+       I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
        txq->tx_tail = tx_id;
 
        return nb_tx;
@@ -1768,7 +1257,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
        for (i = 0; i < txq->tx_rs_thresh; i++)
                rte_prefetch0((txep + i)->mbuf);
 
-       if (!(txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT)) {
+       if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
                for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
                        rte_mempool_put(txep->mbuf->pool, txep->mbuf);
                        txep->mbuf = NULL;
@@ -1796,7 +1285,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
        uint32_t i;
 
        for (i = 0; i < 4; i++, txdp++, pkts++) {
-               dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+               dma_addr = rte_mbuf_data_dma_addr(*pkts);
                txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
                txdp->cmd_type_offset_bsz =
                        i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1810,7 +1299,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
 {
        uint64_t dma_addr;
 
-       dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
+       dma_addr = rte_mbuf_data_dma_addr(*pkts);
        txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
        txdp->cmd_type_offset_bsz =
                i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1898,7 +1387,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
 
        /* Update the tx tail register */
        rte_wmb();
-       I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+       I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
 
        return nb_pkts;
 }
@@ -1929,6 +1418,63 @@ i40e_xmit_pkts_simple(void *tx_queue,
        return nb_tx;
 }
 
+/*********************************************************************
+ *
+ *  TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
+{
+       int i, ret;
+       uint64_t ol_flags;
+       struct rte_mbuf *m;
+
+       for (i = 0; i < nb_pkts; i++) {
+               m = tx_pkts[i];
+               ol_flags = m->ol_flags;
+
+               /**
+                * m->nb_segs is uint8_t, so nb_segs is always less than
+                * I40E_TX_MAX_SEG.
+                * We check only a condition for nb_segs > I40E_TX_MAX_MTU_SEG.
+                */
+               if (!(ol_flags & PKT_TX_TCP_SEG)) {
+                       if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+                               rte_errno = -EINVAL;
+                               return i;
+                       }
+               } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
+                               (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+                       /* MSS outside the range (256B - 9674B) are considered
+                        * malicious
+                        */
+                       rte_errno = -EINVAL;
+                       return i;
+               }
+
+               if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+                       rte_errno = -ENOTSUP;
+                       return i;
+               }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               ret = rte_validate_tx_offload(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+#endif
+               ret = rte_net_intel_cksum_prepare(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+       }
+       return i;
+}
+
 /*
  * Find the VSI the queue belongs to. 'queue_idx' is the queue index
  * application used, which assume having sequential ones. But from driver's
@@ -2006,7 +1552,8 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
                        i40e_rx_queue_release_mbufs(rxq);
                        i40e_reset_rx_queue(rxq);
-               }
+               } else
+                       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
        }
 
        return err;
@@ -2035,6 +1582,7 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                }
                i40e_rx_queue_release_mbufs(rxq);
                i40e_reset_rx_queue(rxq);
+               dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
 
        return 0;
@@ -2060,6 +1608,8 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
                if (err)
                        PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
                                    tx_queue_id);
+               else
+                       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
        }
 
        return err;
@@ -2089,11 +1639,55 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
                i40e_tx_queue_release_mbufs(txq);
                i40e_reset_tx_queue(txq);
+               dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
 
        return 0;
 }
 
+const uint32_t *
+i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               /* refers to i40e_rxd_pkt_type_mapping() */
+               RTE_PTYPE_L2_ETHER,
+               RTE_PTYPE_L2_ETHER_TIMESYNC,
+               RTE_PTYPE_L2_ETHER_LLDP,
+               RTE_PTYPE_L2_ETHER_ARP,
+               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_L4_FRAG,
+               RTE_PTYPE_L4_ICMP,
+               RTE_PTYPE_L4_NONFRAG,
+               RTE_PTYPE_L4_SCTP,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_TUNNEL_GRENAT,
+               RTE_PTYPE_TUNNEL_IP,
+               RTE_PTYPE_INNER_L2_ETHER,
+               RTE_PTYPE_INNER_L2_ETHER_VLAN,
+               RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_INNER_L4_FRAG,
+               RTE_PTYPE_INNER_L4_ICMP,
+               RTE_PTYPE_INNER_L4_NONFRAG,
+               RTE_PTYPE_INNER_L4_SCTP,
+               RTE_PTYPE_INNER_L4_TCP,
+               RTE_PTYPE_INNER_L4_UDP,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == i40e_recv_pkts ||
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+           dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+#endif
+           dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
+           dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+           dev->rx_pkt_burst == i40e_recv_pkts_vec)
+               return ptypes;
+       return NULL;
+}
+
 int
 i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        uint16_t queue_idx,
@@ -2114,7 +1708,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        uint16_t base, bsf, tc_mapping;
        int use_def_burst_func = 1;
 
-       if (hw->mac.type == I40E_MAC_VF) {
+       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
                struct i40e_vf *vf =
                        I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
                vsi = &vf->vsi;
@@ -2148,13 +1742,13 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (!rxq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
                            "rx queue data structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        rxq->mp = mp;
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
-       if (hw->mac.type == I40E_MAC_VF)
+       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
                rxq->reg_idx = queue_idx;
        else /* PF device */
                rxq->reg_idx = vsi->base_queue +
@@ -2168,28 +1762,31 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
 
        /* Allocate the maximun number of RX ring hardware descriptor. */
-       ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
-       ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
-       rz = i40e_ring_dma_zone_reserve(dev,
-                                       "rx_ring",
-                                       queue_idx,
-                                       ring_size,
-                                       socket_id);
+       len = I40E_MAX_RING_DESC;
+
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+       /**
+        * Allocating a little more memory because vectorized/bulk_alloc Rx
+        * functions doesn't check boundaries each time.
+        */
+       len += RTE_PMD_I40E_RX_MAX_BURST;
+#endif
+
+       ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc),
+                             I40E_DMA_MEM_ALIGN);
+
+       rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
+                             ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!rz) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Zero all the descriptors in the ring. */
        memset(rz->addr, 0, ring_size);
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
-       rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
-
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
@@ -2207,7 +1804,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (!rxq->sw_ring) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        i40e_reset_rx_queue(rxq);
@@ -2339,7 +1936,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        uint16_t tx_rs_thresh, tx_free_thresh;
        uint16_t i, base, bsf, tc_mapping;
 
-       if (hw->mac.type == I40E_MAC_VF) {
+       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
                struct i40e_vf *vf =
                        I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
                vsi = &vf->vsi;
@@ -2394,8 +1991,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                return I40E_ERR_PARAM;
        }
        if (tx_free_thresh >= (nb_desc - 3)) {
-               PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
-                            "tx_free_thresh must be less than the "
+               PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
                             "number of TX descriptors minus 3. "
                             "(tx_free_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
@@ -2446,21 +2042,18 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        if (!txq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
                            "tx queue structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Allocate TX hardware ring descriptors. */
        ring_size = sizeof(struct i40e_tx_desc) * I40E_MAX_RING_DESC;
        ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
-       tz = i40e_ring_dma_zone_reserve(dev,
-                                       "tx_ring",
-                                       queue_idx,
-                                       ring_size,
-                                       socket_id);
+       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                             ring_size, I40E_RING_BASE_ALIGN, socket_id);
        if (!tz) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->nb_tx_desc = nb_desc;
@@ -2470,7 +2063,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->hthresh = tx_conf->tx_thresh.hthresh;
        txq->wthresh = tx_conf->tx_thresh.wthresh;
        txq->queue_id = queue_idx;
-       if (hw->mac.type == I40E_MAC_VF)
+       if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF)
                txq->reg_idx = queue_idx;
        else /* PF device */
                txq->reg_idx = vsi->base_queue +
@@ -2481,11 +2074,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->vsi = vsi;
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
-       txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
        txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
 
        /* Allocate software ring */
@@ -2497,7 +2086,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        if (!txq->sw_ring) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        i40e_reset_tx_queue(txq);
@@ -2538,47 +2127,21 @@ i40e_dev_tx_queue_release(void *txq)
        rte_free(q);
 }
 
-static const struct rte_memzone *
-i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,
-                          const char *ring_name,
-                          uint16_t queue_id,
-                          uint32_t ring_size,
-                          int socket_id)
-{
-       char z_name[RTE_MEMZONE_NAMESIZE];
-       const struct rte_memzone *mz;
-
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                       dev->driver->pci_drv.name, ring_name,
-                               dev->data->port_id, queue_id);
-       mz = rte_memzone_lookup(z_name);
-       if (mz)
-               return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
-       return rte_memzone_reserve_bounded(z_name, ring_size,
-               socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
-#else
-       return rte_memzone_reserve_aligned(z_name, ring_size,
-                               socket_id, 0, I40E_RING_BASE_ALIGN);
-#endif
-}
-
 const struct rte_memzone *
 i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
 {
-       const struct rte_memzone *mz = NULL;
+       const struct rte_memzone *mz;
 
        mz = rte_memzone_lookup(name);
        if (mz)
                return mz;
-#ifdef RTE_LIBRTE_XEN_DOM0
-       mz = rte_memzone_reserve_bounded(name, len,
-               socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
-#else
-       mz = rte_memzone_reserve_aligned(name, len,
+
+       if (rte_xen_dom0_supported())
+               mz = rte_memzone_reserve_bounded(name, len,
+                               socket_id, 0, I40E_RING_BASE_ALIGN, RTE_PGSIZE_2M);
+       else
+               mz = rte_memzone_reserve_aligned(name, len,
                                socket_id, 0, I40E_RING_BASE_ALIGN);
-#endif
        return mz;
 }
 
@@ -2593,8 +2156,8 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq)
                return;
        }
 
-       if (!rxq || !rxq->sw_ring) {
-               PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+       if (!rxq->sw_ring) {
+               PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL");
                return;
        }
 
@@ -2769,7 +2332,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
 
        for (i = 0; i < rxq->nb_rx_desc; i++) {
                volatile union i40e_rx_desc *rxd;
-               struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp);
+               struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp);
 
                if (unlikely(!mbuf)) {
                        PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
@@ -2783,7 +2346,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
                mbuf->port = rxq->port_id;
 
                dma_addr =
-                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+                       rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
 
                rxd = &rxq->rx_ring[i];
                rxd->read.pkt_addr = dma_addr;
@@ -2899,7 +2462,12 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
        rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0;
        rx_ctx.l2tsel = 1;
-       rx_ctx.showiv = 1;
+       /* showiv indicates if inner VLAN is stripped inside of tunnel
+        * packet. When set it to 1, vlan information is stripped from
+        * the inner header, but the hardware does not put it in the
+        * descriptor. So set it zero by default.
+        */
+       rx_ctx.showiv = 0;
        rx_ctx.prefena = 1;
 
        err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2937,11 +2505,15 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               if (!dev->data->tx_queues[i])
+                       continue;
                i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
                i40e_reset_tx_queue(dev->data->tx_queues[i]);
        }
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               if (!dev->data->rx_queues[i])
+                       continue;
                i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
                i40e_reset_rx_queue(dev->data->rx_queues[i]);
        }
@@ -2955,12 +2527,16 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               if (!dev->data->rx_queues[i])
+                       continue;
                i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               if (!dev->data->tx_queues[i])
+                       continue;
                i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
                dev->data->tx_queues[i] = NULL;
        }
@@ -2976,13 +2552,15 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
        struct i40e_tx_queue *txq;
        const struct rte_memzone *tz = NULL;
        uint32_t ring_size;
-       struct rte_eth_dev *dev = pf->adapter->eth_dev;
+       struct rte_eth_dev *dev;
 
        if (!pf) {
                PMD_DRV_LOG(ERR, "PF is not available");
                return I40E_ERR_BAD_PTR;
        }
 
+       dev = pf->adapter->eth_dev;
+
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("i40e fdir tx queue",
                                  sizeof(struct i40e_tx_queue),
@@ -2998,11 +2576,9 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
        ring_size = sizeof(struct i40e_tx_desc) * I40E_FDIR_NUM_TX_DESC;
        ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
 
-       tz = i40e_ring_dma_zone_reserve(dev,
-                                       "fdir_tx_ring",
-                                       I40E_FDIR_QUEUE_ID,
-                                       ring_size,
-                                       SOCKET_ID_ANY);
+       tz = rte_eth_dma_zone_reserve(dev, "fdir_tx_ring",
+                                     I40E_FDIR_QUEUE_ID, ring_size,
+                                     I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!tz) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX.");
@@ -3014,11 +2590,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
        txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
        txq->vsi = pf->fdir.fdir_vsi;
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
-#else
-       txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
-#endif
        txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
        /*
         * don't need to allocate software ring and reset for the fdir
@@ -3036,13 +2608,15 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        struct i40e_rx_queue *rxq;
        const struct rte_memzone *rz = NULL;
        uint32_t ring_size;
-       struct rte_eth_dev *dev = pf->adapter->eth_dev;
+       struct rte_eth_dev *dev;
 
        if (!pf) {
                PMD_DRV_LOG(ERR, "PF is not available");
                return I40E_ERR_BAD_PTR;
        }
 
+       dev = pf->adapter->eth_dev;
+
        /* Allocate the RX queue data structure. */
        rxq = rte_zmalloc_socket("i40e fdir rx queue",
                                  sizeof(struct i40e_rx_queue),
@@ -3058,11 +2632,9 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        ring_size = sizeof(union i40e_rx_desc) * I40E_FDIR_NUM_RX_DESC;
        ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
 
-       rz = i40e_ring_dma_zone_reserve(dev,
-                                       "fdir_rx_ring",
-                                       I40E_FDIR_QUEUE_ID,
-                                       ring_size,
-                                       SOCKET_ID_ANY);
+       rz = rte_eth_dma_zone_reserve(dev, "fdir_rx_ring",
+                                     I40E_FDIR_QUEUE_ID, ring_size,
+                                     I40E_RING_BASE_ALIGN, SOCKET_ID_ANY);
        if (!rz) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX.");
@@ -3074,11 +2646,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
        rxq->vsi = pf->fdir.fdir_vsi;
 
-#ifdef RTE_LIBRTE_XEN_DOM0
        rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
-#else
-       rxq->rx_ring_phys_addr = (uint64_t)rz->phys_addr;
-#endif
        rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
 
        /*
@@ -3091,6 +2659,43 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        return I40E_SUCCESS;
 }
 
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_rxq_info *qinfo)
+{
+       struct i40e_rx_queue *rxq;
+
+       rxq = dev->data->rx_queues[queue_id];
+
+       qinfo->mp = rxq->mp;
+       qinfo->scattered_rx = dev->data->scattered_rx;
+       qinfo->nb_desc = rxq->nb_rx_desc;
+
+       qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+       qinfo->conf.rx_drop_en = rxq->drop_en;
+       qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+       struct rte_eth_txq_info *qinfo)
+{
+       struct i40e_tx_queue *txq;
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+
+       qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+       qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+       qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+       qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+       qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+       qinfo->conf.txq_flags = txq->txq_flags;
+       qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
+
 void __attribute__((cold))
 i40e_set_rx_function(struct rte_eth_dev *dev)
 {
@@ -3114,7 +2719,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                                struct i40e_rx_queue *rxq =
                                        dev->data->rx_queues[i];
 
-                               if (i40e_rxq_vec_setup(rxq)) {
+                               if (rxq && i40e_rxq_vec_setup(rxq)) {
                                        ad->rx_vec_allowed = false;
                                        break;
                                }
@@ -3176,7 +2781,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
 
-                       rxq->rx_using_sse = rx_using_sse;
+                       if (rxq)
+                               rxq->rx_using_sse = rx_using_sse;
                }
        }
 }
@@ -3215,7 +2821,7 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
                                struct i40e_tx_queue *txq =
                                        dev->data->tx_queues[i];
 
-                               if (i40e_txq_vec_setup(txq)) {
+                               if (txq && i40e_txq_vec_setup(txq)) {
                                        ad->tx_vec_allowed = false;
                                        break;
                                }
@@ -3231,9 +2837,11 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
                        PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
                        dev->tx_pkt_burst = i40e_xmit_pkts_simple;
                }
+               dev->tx_pkt_prepare = NULL;
        } else {
                PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
                dev->tx_pkt_burst = i40e_xmit_pkts;
+               dev->tx_pkt_prepare = i40e_prep_pkts;
        }
 }
 
@@ -3287,4 +2895,3 @@ i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
 {
        return 0;
 }
-