net/mlx5: support Rx tunnel type identification
authorXueming Li <xuemingl@mellanox.com>
Mon, 23 Apr 2018 12:33:03 +0000 (20:33 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 27 Apr 2018 17:00:56 +0000 (18:00 +0100)
This patch introduced tunnel type identification based on flow rules.
If flows of multiple tunnel types built on same queue, no tunnel type
will be returned. User application could use bits in flow mark as tunnel
type identifier.

Signed-off-by: Xueming Li <xuemingl@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.c
drivers/net/mlx5/mlx5_rxtx.h
drivers/net/mlx5/mlx5_rxtx_vec_neon.h
drivers/net/mlx5/mlx5_rxtx_vec_sse.h

index f1811c5..a886f7e 100644 (file)
@@ -227,6 +227,7 @@ struct rte_flow {
        struct rte_flow_action_rss rss_conf; /**< RSS configuration */
        uint16_t (*queues)[]; /**< Queues indexes to use. */
        uint8_t rss_key[40]; /**< copy of the RSS key. */
+       uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */
        struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
        struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
        struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
@@ -243,6 +244,11 @@ struct rte_flow {
        (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
        (type) == RTE_FLOW_ITEM_TYPE_GRE)
 
+const uint32_t flow_ptype[] = {
+       [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+       [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
+};
+
 #define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12)
 
 const uint32_t ptype_ext[] = {
@@ -880,7 +886,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
                if (ret)
                        goto exit_item_not_supported;
                if (IS_TUNNEL(items->type)) {
-                       if (parser->inner) {
+                       if (parser->tunnel) {
                                rte_flow_error_set(error, ENOTSUP,
                                                   RTE_FLOW_ERROR_TYPE_ITEM,
                                                   items,
@@ -889,6 +895,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
                                return -rte_errno;
                        }
                        parser->inner = IBV_FLOW_SPEC_INNER;
+                       parser->tunnel = flow_ptype[items->type];
                }
                if (parser->drop) {
                        parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
@@ -1197,6 +1204,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
        }
        /* Third step. Conversion parse, fill the specifications. */
        parser->inner = 0;
+       parser->tunnel = 0;
        for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
                struct mlx5_flow_data data = {
                        .dev = dev,
@@ -1684,6 +1692,7 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
 
        id.vni[0] = 0;
        parser->inner = IBV_FLOW_SPEC_INNER;
+       parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)];
        if (spec) {
                if (!mask)
                        mask = default_mask;
@@ -1740,6 +1749,7 @@ mlx5_flow_create_gre(const struct rte_flow_item *item __rte_unused,
        unsigned int i;
 
        parser->inner = IBV_FLOW_SPEC_INNER;
+       parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];
        /* Update encapsulation IP layer protocol. */
        for (i = 0; i != hash_rxq_init_n; ++i) {
                if (!parser->queue[i].ibv_attr)
@@ -1946,7 +1956,8 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
                                      parser->rss_conf.key_len,
                                      hash_fields,
                                      parser->rss_conf.queue,
-                                     parser->rss_conf.queue_num);
+                                     parser->rss_conf.queue_num,
+                                     parser->tunnel);
                if (flow->frxq[i].hrxq)
                        continue;
                flow->frxq[i].hrxq =
@@ -1955,7 +1966,8 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
                                      parser->rss_conf.key_len,
                                      hash_fields,
                                      parser->rss_conf.queue,
-                                     parser->rss_conf.queue_num);
+                                     parser->rss_conf.queue_num,
+                                     parser->tunnel);
                if (!flow->frxq[i].hrxq) {
                        return rte_flow_error_set(error, ENOMEM,
                                                  RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1966,6 +1978,48 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
        return 0;
 }
 
+/**
+ * RXQ update after flow rule creation.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param flow
+ *   Pointer to the flow rule.
+ */
+static void
+mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+       struct priv *priv = dev->data->dev_private;
+       unsigned int i;
+       unsigned int j;
+
+       if (!dev->data->dev_started)
+               return;
+       for (i = 0; i != flow->rss_conf.queue_num; ++i) {
+               struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
+                                                [(*flow->queues)[i]];
+               struct mlx5_rxq_ctrl *rxq_ctrl =
+                       container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+               uint8_t tunnel = PTYPE_IDX(flow->tunnel);
+
+               rxq_data->mark |= flow->mark;
+               if (!tunnel)
+                       continue;
+               rxq_ctrl->tunnel_types[tunnel] += 1;
+               /* Clear tunnel type if more than one tunnel types set. */
+               for (j = 0; j != RTE_DIM(rxq_ctrl->tunnel_types); ++j) {
+                       if (j == tunnel)
+                               continue;
+                       if (rxq_ctrl->tunnel_types[j] > 0) {
+                               rxq_data->tunnel = 0;
+                               break;
+                       }
+               }
+               if (j == RTE_DIM(rxq_ctrl->tunnel_types))
+                       rxq_data->tunnel = flow->tunnel;
+       }
+}
+
 /**
  * Complete flow rule creation.
  *
@@ -2026,12 +2080,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
                                   NULL, "internal error in flow creation");
                goto error;
        }
-       for (i = 0; i != parser->rss_conf.queue_num; ++i) {
-               struct mlx5_rxq_data *q =
-                       (*priv->rxqs)[parser->rss_conf.queue[i]];
-
-               q->mark |= parser->mark;
-       }
+       mlx5_flow_create_update_rxqs(dev, flow);
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
@@ -2104,6 +2153,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
        }
        /* Copy configuration. */
        flow->queues = (uint16_t (*)[])(flow + 1);
+       flow->tunnel = parser.tunnel;
        flow->rss_conf = (struct rte_flow_action_rss){
                .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
                .level = 0,
@@ -2195,9 +2245,38 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
        struct priv *priv = dev->data->dev_private;
        unsigned int i;
 
-       if (flow->drop || !flow->mark)
+       if (flow->drop || !dev->data->dev_started)
                goto free;
-       for (i = 0; i != flow->rss_conf.queue_num; ++i) {
+       for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) {
+               /* Update queue tunnel type. */
+               struct mlx5_rxq_data *rxq_data = (*priv->rxqs)
+                                                [(*flow->queues)[i]];
+               struct mlx5_rxq_ctrl *rxq_ctrl =
+                       container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+               uint8_t tunnel = PTYPE_IDX(flow->tunnel);
+
+               assert(rxq_ctrl->tunnel_types[tunnel] > 0);
+               rxq_ctrl->tunnel_types[tunnel] -= 1;
+               if (!rxq_ctrl->tunnel_types[tunnel]) {
+                       /* Update tunnel type. */
+                       uint8_t j;
+                       uint8_t types = 0;
+                       uint8_t last;
+
+                       for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++)
+                               if (rxq_ctrl->tunnel_types[j]) {
+                                       types += 1;
+                                       last = j;
+                               }
+                       /* Keep same if more than one tunnel types left. */
+                       if (types == 1)
+                               rxq_data->tunnel = ptype_ext[last];
+                       else if (types == 0)
+                               /* No tunnel type left. */
+                               rxq_data->tunnel = 0;
+               }
+       }
+       for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) {
                struct rte_flow *tmp;
                int mark = 0;
 
@@ -2416,9 +2495,9 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
 {
        struct priv *priv = dev->data->dev_private;
        struct rte_flow *flow;
+       unsigned int i;
 
        TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
-               unsigned int i;
                struct mlx5_ind_table_ibv *ind_tbl = NULL;
 
                if (flow->drop) {
@@ -2464,6 +2543,18 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
                DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id,
                        (void *)flow);
        }
+       /* Cleanup Rx queue tunnel info. */
+       for (i = 0; i != priv->rxqs_n; ++i) {
+               struct mlx5_rxq_data *q = (*priv->rxqs)[i];
+               struct mlx5_rxq_ctrl *rxq_ctrl =
+                       container_of(q, struct mlx5_rxq_ctrl, rxq);
+
+               if (!q)
+                       continue;
+               memset((void *)rxq_ctrl->tunnel_types, 0,
+                      sizeof(rxq_ctrl->tunnel_types));
+               q->tunnel = 0;
+       }
 }
 
 /**
@@ -2511,7 +2602,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
                                              flow->rss_conf.key_len,
                                              hash_rxq_init[i].hash_fields,
                                              flow->rss_conf.queue,
-                                             flow->rss_conf.queue_num);
+                                             flow->rss_conf.queue_num,
+                                             flow->tunnel);
                        if (flow->frxq[i].hrxq)
                                goto flow_create;
                        flow->frxq[i].hrxq =
@@ -2519,7 +2611,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
                                              flow->rss_conf.key_len,
                                              hash_rxq_init[i].hash_fields,
                                              flow->rss_conf.queue,
-                                             flow->rss_conf.queue_num);
+                                             flow->rss_conf.queue_num,
+                                             flow->tunnel);
                        if (!flow->frxq[i].hrxq) {
                                DRV_LOG(DEBUG,
                                        "port %u flow %p cannot be applied",
@@ -2541,10 +2634,7 @@ flow_create:
                        DRV_LOG(DEBUG, "port %u flow %p applied",
                                dev->data->port_id, (void *)flow);
                }
-               if (!flow->mark)
-                       continue;
-               for (i = 0; i != flow->rss_conf.queue_num; ++i)
-                       (*priv->rxqs)[flow->rss_conf.queue[i]]->mark = 1;
+               mlx5_flow_create_update_rxqs(dev, flow);
        }
        return 0;
 }
index d2b25e8..aadcfbc 100644 (file)
@@ -1386,6 +1386,8 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
  *   first queue index will be taken for the indirection table.
  * @param queues_n
  *   Number of queues.
+ * @param tunnel
+ *   Tunnel type.
  *
  * @return
  *   The Verbs object initialised, NULL otherwise and rte_errno is set.
@@ -1394,7 +1396,7 @@ struct mlx5_hrxq *
 mlx5_hrxq_new(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
-             const uint16_t *queues, uint32_t queues_n)
+             const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
 {
        struct priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
@@ -1442,6 +1444,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
        hrxq->qp = qp;
        hrxq->rss_key_len = rss_key_len;
        hrxq->hash_fields = hash_fields;
+       hrxq->tunnel = tunnel;
        memcpy(hrxq->rss_key, rss_key, rss_key_len);
        rte_atomic32_inc(&hrxq->refcnt);
        LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
@@ -1470,6 +1473,8 @@ error:
  *   first queue index will be taken for the indirection table.
  * @param queues_n
  *   Number of queues.
+ * @param tunnel
+ *   Tunnel type.
  *
  * @return
  *   An hash Rx queue on success.
@@ -1478,7 +1483,7 @@ struct mlx5_hrxq *
 mlx5_hrxq_get(struct rte_eth_dev *dev,
              const uint8_t *rss_key, uint32_t rss_key_len,
              uint64_t hash_fields,
-             const uint16_t *queues, uint32_t queues_n)
+             const uint16_t *queues, uint32_t queues_n, uint32_t tunnel)
 {
        struct priv *priv = dev->data->dev_private;
        struct mlx5_hrxq *hrxq;
@@ -1493,6 +1498,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev,
                        continue;
                if (hrxq->hash_fields != hash_fields)
                        continue;
+               if (hrxq->tunnel != tunnel)
+                       continue;
                ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
                if (!ind_tbl)
                        continue;
index cd019fb..3eb3f6d 100644 (file)
@@ -34,7 +34,7 @@
 #include "mlx5_prm.h"
 
 static __rte_always_inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
 
 static __rte_always_inline int
 mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
@@ -128,12 +128,14 @@ mlx5_set_ptype_table(void)
        (*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_L4_UDP;
        /* Tunneled - L3 */
+       (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
        (*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L4_NONFRAG;
        (*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L4_NONFRAG;
+       (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
        (*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
                     RTE_PTYPE_INNER_L4_NONFRAG;
@@ -1667,6 +1669,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 /**
  * Translate RX completion flags to packet type.
  *
+ * @param[in] rxq
+ *   Pointer to RX queue structure.
  * @param[in] cqe
  *   Pointer to CQE.
  *
@@ -1676,7 +1680,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
  *   Packet type for struct rte_mbuf.
  */
 static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
 {
        uint8_t idx;
        uint8_t pinfo = cqe->pkt_info;
@@ -1691,7 +1695,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
         * bit[7] = outer_l3_type
         */
        idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
-       return mlx5_ptype_table[idx];
+       return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
 }
 
 /**
@@ -1923,7 +1927,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        pkt = seg;
                        assert(len >= (rxq->crc_present << 2));
                        /* Update packet information. */
-                       pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+                       pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
                        pkt->ol_flags = 0;
                        if (rss_hash_res && rxq->rss_hash) {
                                pkt->hash.rss = rss_hash_res;
index c3a1ae2..5e857e3 100644 (file)
@@ -104,6 +104,7 @@ struct mlx5_rxq_data {
        void *cq_uar; /* CQ user access region. */
        uint32_t cqn; /* CQ number. */
        uint8_t cq_arm_sn; /* CQ arm seq number. */
+       uint32_t tunnel; /* Tunnel information. */
 } __rte_cache_aligned;
 
 /* Verbs Rx queue elements. */
@@ -125,6 +126,7 @@ struct mlx5_rxq_ctrl {
        struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
        struct mlx5_rxq_data rxq; /* Data path structure. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       uint32_t tunnel_types[16]; /* Tunnel type counter. */
        unsigned int irq:1; /* Whether IRQ is enabled. */
        uint16_t idx; /* Queue index. */
 };
@@ -145,6 +147,7 @@ struct mlx5_hrxq {
        struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
        struct ibv_qp *qp; /* Verbs queue pair. */
        uint64_t hash_fields; /* Verbs Hash fields. */
+       uint32_t tunnel; /* Tunnel type. */
        uint32_t rss_key_len; /* Hash key length in bytes. */
        uint8_t rss_key[]; /* Hash key. */
 };
@@ -249,11 +252,13 @@ int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
 struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
                                const uint8_t *rss_key, uint32_t rss_key_len,
                                uint64_t hash_fields,
-                               const uint16_t *queues, uint32_t queues_n);
+                               const uint16_t *queues, uint32_t queues_n,
+                               uint32_t tunnel);
 struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
                                const uint8_t *rss_key, uint32_t rss_key_len,
                                uint64_t hash_fields,
-                               const uint16_t *queues, uint32_t queues_n);
+                               const uint16_t *queues, uint32_t queues_n,
+                               uint32_t tunnel);
 int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
 int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
 uint64_t mlx5_get_rx_port_offloads(void);
index 240d396..2673d6b 100644 (file)
@@ -551,6 +551,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
        const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
        const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
        uint64x2_t rearm0, rearm1, rearm2, rearm3;
+       uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
 
        if (rxq->mark) {
                const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
@@ -583,14 +584,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
        ptype = vshrn_n_u32(ptype_info, 10);
        /* Errored packets will have RTE_PTYPE_ALL_MASK. */
        ptype = vorr_u16(ptype, op_err);
-       pkts[0]->packet_type =
-               mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
-       pkts[1]->packet_type =
-               mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
-       pkts[2]->packet_type =
-               mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
-       pkts[3]->packet_type =
-               mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
+       pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
+       pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
+       pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
+       pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
+       pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+                              !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+       pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+                              !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+       pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+                              !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+       pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+                              !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
        /* Fill flags for checksum and VLAN. */
        pinfo = vandq_u32(ptype_info, ptype_ol_mask);
        pinfo = vreinterpretq_u32_u8(
index d10da29..3e985d6 100644 (file)
@@ -542,6 +542,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
        const __m128i mbuf_init =
                _mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
        __m128i rearm0, rearm1, rearm2, rearm3;
+       uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
 
        /* Extract pkt_info field. */
        pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
@@ -595,10 +596,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
        /* Errored packets will have RTE_PTYPE_ALL_MASK. */
        op_err = _mm_srli_epi16(op_err, 8);
        ptype = _mm_or_si128(ptype, op_err);
-       pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
-       pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
-       pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
-       pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
+       pt_idx0 = _mm_extract_epi8(ptype, 0);
+       pt_idx1 = _mm_extract_epi8(ptype, 2);
+       pt_idx2 = _mm_extract_epi8(ptype, 4);
+       pt_idx3 = _mm_extract_epi8(ptype, 6);
+       pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+                              !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+       pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+                              !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+       pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+                              !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+       pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+                              !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
        /* Fill flags for checksum and VLAN. */
        pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
        pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);