mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / enic / enic_rxtx.c
index 7129e12..c44715b 100644 (file)
@@ -4,13 +4,14 @@
  */
 
 #include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_net.h>
 #include <rte_prefetch.h>
 
 #include "enic_compat.h"
 #include "rq_enet_desc.h"
 #include "enic.h"
+#include "enic_rxtx_common.h"
 #include <rte_ether.h>
 #include <rte_ip.h>
 #include <rte_tcp.h>
 #define rte_packet_prefetch(p) do {} while (0)
 #endif
 
-static inline uint16_t
-enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
-{
-       return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
-}
-
-static inline uint16_t
-enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
-{
-       return le16_to_cpu(crd->bytes_written_flags) &
-                          ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_packet_error(uint16_t bwflags)
-{
-       return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
-               CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_eop(uint16_t ciflags)
-{
-       return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
-               == CQ_ENET_RQ_DESC_FLAGS_EOP;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
-{
-       return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
-               CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
-               CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
-       return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
-               CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
-{
-       return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
-               CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
-}
-
-static inline uint8_t
-enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
-{
-       return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
-               CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
-}
-
-static inline uint32_t
-enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
-{
-       return le32_to_cpu(cqrd->rss_hash);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
-{
-       return le16_to_cpu(cqrd->vlan);
-}
-
-static inline uint16_t
-enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
-{
-       struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-       return le16_to_cpu(cqrd->bytes_written_flags) &
-               CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
-}
-
-
-static inline uint8_t
-enic_cq_rx_check_err(struct cq_desc *cqd)
-{
-       struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-       uint16_t bwflags;
-
-       bwflags = enic_cq_rx_desc_bwflags(cqrd);
-       if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
-               return 1;
-       return 0;
-}
-
-/* Lookup table to translate RX CQ flags to mbuf flags. */
-static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
-{
-       struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-       uint8_t cqrd_flags = cqrd->flags;
-       /*
-        * Odd-numbered entries are for tunnel packets. All packet type info
-        * applies to the inner packet, and there is no info on the outer
-        * packet. The outer flags in these entries exist only to avoid
-        * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
-        * afterwards.
-        *
-        * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
-        * RTE_PTYPE_TUNNEL_GRENAT..
-        */
-       static const uint32_t cq_type_table[128] __rte_cache_aligned = {
-               [0x00] = RTE_PTYPE_UNKNOWN,
-               [0x01] = RTE_PTYPE_UNKNOWN |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER,
-               [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
-               [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_NONFRAG,
-               [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
-               [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_UDP,
-               [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
-               [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_TCP,
-               [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-               [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_FRAG,
-               [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-               [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_FRAG,
-               [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-               [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_FRAG,
-               [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
-               [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_NONFRAG,
-               [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
-               [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_UDP,
-               [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
-               [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_TCP,
-               [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-               [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_FRAG,
-               [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-               [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_FRAG,
-               [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
-               [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
-                        RTE_PTYPE_TUNNEL_GRENAT |
-                        RTE_PTYPE_INNER_L2_ETHER |
-                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
-                        RTE_PTYPE_INNER_L4_FRAG,
-               /* All others reserved */
-       };
-       cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
-               | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
-               | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
-       return cq_type_table[cqrd_flags + tnl];
-}
-
-static inline void
-enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
-{
-       struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
-       uint16_t bwflags, pkt_flags = 0, vlan_tci;
-       bwflags = enic_cq_rx_desc_bwflags(cqrd);
-       vlan_tci = enic_cq_rx_desc_vlan(cqrd);
-
-       /* VLAN STRIPPED flag. The L2 packet type updated here also */
-       if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
-               pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
-               mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
-       } else {
-               if (vlan_tci != 0)
-                       mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
-               else
-                       mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
-       }
-       mbuf->vlan_tci = vlan_tci;
-
-       if ((cqd->type_color & CQ_DESC_TYPE_MASK) == CQ_DESC_TYPE_CLASSIFIER) {
-               struct cq_enet_rq_clsf_desc *clsf_cqd;
-               uint16_t filter_id;
-               clsf_cqd = (struct cq_enet_rq_clsf_desc *)cqd;
-               filter_id = clsf_cqd->filter_id;
-               if (filter_id) {
-                       pkt_flags |= PKT_RX_FDIR;
-                       if (filter_id != ENIC_MAGIC_FILTER_ID) {
-                               mbuf->hash.fdir.hi = clsf_cqd->filter_id;
-                               pkt_flags |= PKT_RX_FDIR_ID;
-                       }
-               }
-       } else if (enic_cq_rx_desc_rss_type(cqrd)) {
-               /* RSS flag */
-               pkt_flags |= PKT_RX_RSS_HASH;
-               mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
-       }
-
-       /* checksum flags */
-       if (mbuf->packet_type & (RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV6)) {
-               if (!enic_cq_rx_desc_csum_not_calc(cqrd)) {
-                       uint32_t l4_flags;
-                       l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
-
-                       /*
-                        * When overlay offload is enabled, the NIC may
-                        * set ipv4_csum_ok=1 if the inner packet is IPv6..
-                        * So, explicitly check for IPv4 before checking
-                        * ipv4_csum_ok.
-                        */
-                       if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
-                               if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
-                                       pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
-                               else
-                                       pkt_flags |= PKT_RX_IP_CKSUM_BAD;
-                       }
-
-                       if (l4_flags == RTE_PTYPE_L4_UDP ||
-                           l4_flags == RTE_PTYPE_L4_TCP) {
-                               if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
-                                       pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
-                               else
-                                       pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-                       }
-               }
-       }
-
-       mbuf->ol_flags = pkt_flags;
-}
-
 /* dummy receive function to replace actual function in
  * order to do safe reconfiguration operations.
  */
@@ -301,9 +42,9 @@ enic_dummy_recv_pkts(__rte_unused void *rx_queue,
        return 0;
 }
 
-uint16_t
-enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-              uint16_t nb_pkts)
+static inline uint16_t
+enic_recv_pkts_common(void *rx_queue, struct rte_mbuf **rx_pkts,
+                     uint16_t nb_pkts, const bool use_64b_desc)
 {
        struct vnic_rq *sop_rq = rx_queue;
        struct vnic_rq *data_rq;
@@ -321,10 +62,15 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        uint16_t seg_length;
        struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
        struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
+       const int desc_size = use_64b_desc ?
+               sizeof(struct cq_enet_rq_desc_64) :
+               sizeof(struct cq_enet_rq_desc);
+       RTE_BUILD_BUG_ON(sizeof(struct cq_enet_rq_desc_64) != 64);
 
        cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
        cq_idx = cq->to_clean;          /* index of cqd, rqd, mbuf_table */
-       cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+       cqd_ptr = (struct cq_desc *)((uintptr_t)(cq->ring.descs) +
+                                    (uintptr_t)cq_idx * desc_size);
        color = cq->last_color;
 
        data_rq = &enic->rq[sop_rq->data_queue_idx];
@@ -337,15 +83,26 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                struct cq_desc cqd;
                uint8_t packet_error;
                uint16_t ciflags;
+               uint8_t tc;
 
                max_rx--;
 
+               tc = *(volatile uint8_t *)((uintptr_t)cqd_ptr + desc_size - 1);
                /* Check for pkts available */
-               if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+               if ((tc & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
                        break;
 
                /* Get the cq descriptor and extract rq info from it */
                cqd = *cqd_ptr;
+               /*
+                * The first 16B of 64B descriptor is identical to the
+                * 16B descriptor, except type_color. Copy type_color
+                * from the 64B descriptor into the 16B descriptor's
+                * field, so the code below can assume the 16B
+                * descriptor format.
+                */
+               if (use_64b_desc)
+                       cqd.type_color = tc;
                rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
                rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
 
@@ -368,7 +125,8 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                cq_idx++;
 
                /* Prefetch next mbuf & desc while processing current one */
-               cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+               cqd_ptr = (struct cq_desc *)((uintptr_t)(cq->ring.descs) +
+                                            (uintptr_t)cq_idx * desc_size);
                rte_enic_prefetch(cqd_ptr);
 
                ciflags = enic_cq_rx_desc_ciflags(
@@ -474,6 +232,18 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return nb_rx;
 }
 
+uint16_t
+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       return enic_recv_pkts_common(rx_queue, rx_pkts, nb_pkts, false);
+}
+
+uint16_t
+enic_recv_pkts_64(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       return enic_recv_pkts_common(rx_queue, rx_pkts, nb_pkts, true);
+}
+
 uint16_t
 enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
@@ -588,7 +358,8 @@ enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return rx - rx_pkts;
 }
 
-static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
+static inline void enic_free_wq_bufs(struct vnic_wq *wq,
+                                    uint16_t completed_index)
 {
        struct rte_mbuf *buf;
        struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
@@ -630,7 +401,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
 
 unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
 {
-       u16 completed_index;
+       uint16_t completed_index;
 
        completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
 
@@ -652,11 +423,22 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        for (i = 0; i != nb_pkts; i++) {
                m = tx_pkts[i];
-               if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
-                       rte_errno = EINVAL;
-                       return i;
-               }
                ol_flags = m->ol_flags;
+               if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+                       if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+                               rte_errno = EINVAL;
+                               return i;
+                       }
+               } else {
+                       uint16_t header_len;
+
+                       header_len = m->l2_len + m->l3_len + m->l4_len;
+                       if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) {
+                               rte_errno = EINVAL;
+                               return i;
+                       }
+               }
+
                if (ol_flags & wq->tx_offload_notsup_mask) {
                        rte_errno = ENOTSUP;
                        return i;
@@ -664,13 +446,13 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
 #endif
                ret = rte_net_intel_cksum_prepare(m);
                if (ret != 0) {
-                       rte_errno = ret;
+                       rte_errno = -ret;
                        return i;
                }
        }
@@ -707,7 +489,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        wq_desc_avail = vnic_wq_desc_avail(wq);
        head_idx = wq->head_idx;
        desc_count = wq->ring.desc_count;
-       ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+       ol_flags_mask = RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK;
        tx_oversized = &enic->soft_stats.tx_oversized;
 
        nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
@@ -718,7 +500,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                data_len = tx_pkt->data_len;
                ol_flags = tx_pkt->ol_flags;
                nb_segs = tx_pkt->nb_segs;
-               tso = ol_flags & PKT_TX_TCP_SEG;
+               tso = ol_flags & RTE_MBUF_F_TX_TCP_SEG;
 
                /* drop packet if it's too big to send */
                if (unlikely(!tso && pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
@@ -735,7 +517,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
                mss = 0;
                vlan_id = tx_pkt->vlan_tci;
-               vlan_tag_insert = !!(ol_flags & PKT_TX_VLAN_PKT);
+               vlan_tag_insert = !!(ol_flags & RTE_MBUF_F_TX_VLAN);
                bus_addr = (dma_addr_t)
                           (tx_pkt->buf_iova + tx_pkt->data_off);
 
@@ -761,20 +543,20 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
                        mss = tx_pkt->tso_segsz;
                        /* For tunnel, need the size of outer+inner headers */
-                       if (ol_flags & PKT_TX_TUNNEL_MASK) {
+                       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
                                header_len += tx_pkt->outer_l2_len +
                                        tx_pkt->outer_l3_len;
                        }
                }
 
                if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
-                       if (ol_flags & PKT_TX_IP_CKSUM)
+                       if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
                                mss |= ENIC_CALC_IP_CKSUM;
 
                        /* Nic uses just 1 bit for UDP and TCP */
-                       switch (ol_flags & PKT_TX_L4_MASK) {
-                       case PKT_TX_TCP_CKSUM:
-                       case PKT_TX_UDP_CKSUM:
+                       switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+                       case RTE_MBUF_F_TX_TCP_CKSUM:
+                       case RTE_MBUF_F_TX_UDP_CKSUM:
                                mss |= ENIC_CALC_TCP_UDP_CKSUM;
                                break;
                        }
@@ -840,12 +622,33 @@ static void enqueue_simple_pkts(struct rte_mbuf **pkts,
                                struct enic *enic)
 {
        struct rte_mbuf *p;
+       uint16_t mss;
 
        while (n) {
                n--;
                p = *pkts++;
                desc->address = p->buf_iova + p->data_off;
                desc->length = p->pkt_len;
+               /* VLAN insert */
+               desc->vlan_tag = p->vlan_tci;
+               desc->header_length_flags &=
+                       ((1 << WQ_ENET_FLAGS_EOP_SHIFT) |
+                        (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT));
+               if (p->ol_flags & RTE_MBUF_F_TX_VLAN) {
+                       desc->header_length_flags |=
+                               1 << WQ_ENET_FLAGS_VLAN_TAG_INSERT_SHIFT;
+               }
+               /*
+                * Checksum offload. We use WQ_ENET_OFFLOAD_MODE_CSUM, which
+                * is 0, so no need to set offload_mode.
+                */
+               mss = 0;
+               if (p->ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+                       mss |= ENIC_CALC_IP_CKSUM << WQ_ENET_MSS_SHIFT;
+               if (p->ol_flags & RTE_MBUF_F_TX_L4_MASK)
+                       mss |= ENIC_CALC_TCP_UDP_CKSUM << WQ_ENET_MSS_SHIFT;
+               desc->mss_loopback = mss;
+
                /*
                 * The app should not send oversized
                 * packets. tx_pkt_prepare includes a check as