mbuf: add accessors for data room and private size
[dpdk.git] / lib / librte_pmd_i40e / i40e_rxtx.c
index 1c5cfd6..493cfa3 100644 (file)
 
 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 
+#define I40E_TX_CKSUM_OFFLOAD_MASK (            \
+               PKT_TX_IP_CKSUM |                \
+               PKT_TX_L4_MASK |                 \
+               PKT_TX_OUTER_IP_CKSUM)
+
 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
        (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
 
@@ -105,6 +110,10 @@ i40e_rxd_status_to_pkt_flags(uint64_t qword)
                                        I40E_RX_DESC_FLTSTAT_RSS_HASH) ==
                        I40E_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0;
 
+       /* Check if FDIR Match */
+       flags |= (qword & (1 << I40E_RX_DESC_STATUS_FLM_SHIFT) ?
+                                                       PKT_RX_FDIR : 0);
+
        return flags;
 }
 
@@ -410,58 +419,98 @@ i40e_rxd_ptype_to_pkt_flags(uint64_t qword)
        return ip_ptype_map[ptype];
 }
 
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK   0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID  0x01
+#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX   0x02
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK   0x03
+#define I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX   0x01
+
+static inline uint64_t
+i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
+{
+       uint64_t flags = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
+       uint16_t flexbh, flexbl;
+
+       flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+               I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
+               I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
+       flexbl = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
+               I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT) &
+               I40E_RX_DESC_EXT_STATUS_FLEXBL_MASK;
+
+
+       if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) {
+               mb->hash.fdir.hi =
+                       rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id);
+               flags |= PKT_RX_FDIR_ID;
+       } else if (flexbh == I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX) {
+               mb->hash.fdir.hi =
+                       rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.flex_bytes_hi);
+               flags |= PKT_RX_FDIR_FLX;
+       }
+       if (flexbl == I40E_RX_DESC_EXT_STATUS_FLEXBL_FLEX) {
+               mb->hash.fdir.lo =
+                       rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
+               flags |= PKT_RX_FDIR_FLX;
+       }
+#else
+       mb->hash.fdir.hi =
+               rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+       flags |= PKT_RX_FDIR_ID;
+#endif
+       return flags;
+}
 static inline void
 i40e_txd_enable_checksum(uint64_t ol_flags,
                        uint32_t *td_cmd,
                        uint32_t *td_offset,
-                       uint8_t l2_len,
-                       uint16_t l3_len,
-                       uint8_t inner_l2_len,
-                       uint16_t inner_l3_len,
+                       union i40e_tx_offload tx_offload,
                        uint32_t *cd_tunneling)
 {
-       if (!l2_len) {
-               PMD_DRV_LOG(DEBUG, "L2 length set to 0");
-               return;
-       }
-       *td_offset |= (l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+       /* UDP tunneling packet TX checksum offload */
+       if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
 
-       if (!l3_len) {
-               PMD_DRV_LOG(DEBUG, "L3 length set to 0");
-               return;
-       }
-
-       /* VXLAN packet TX checksum offload */
-       if (unlikely(ol_flags & PKT_TX_VXLAN_CKSUM)) {
-               uint8_t l4tun_len;
-
-               l4tun_len = ETHER_VXLAN_HLEN + inner_l2_len;
+               *td_offset |= (tx_offload.outer_l2_len >> 1)
+                               << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
-               if (ol_flags & PKT_TX_IPV4_CSUM)
+               if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
-               else if (ol_flags & PKT_TX_IPV6)
+               else if (ol_flags & PKT_TX_OUTER_IPV4)
+                       *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+               else if (ol_flags & PKT_TX_OUTER_IPV6)
                        *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
 
                /* Now set the ctx descriptor fields */
-               *cd_tunneling |= (l3_len >> 2) <<
+               *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
                                I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
-                               I40E_TXD_CTX_UDP_TUNNELING |
-                               (l4tun_len >> 1) <<
+                               (tx_offload.l2_len >> 1) <<
                                I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 
-               l3_len = inner_l3_len;
-       }
+       } else
+               *td_offset |= (tx_offload.l2_len >> 1)
+                       << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
 
        /* Enable L3 checksum offloads */
-       if (ol_flags & PKT_TX_IPV4_CSUM) {
+       if (ol_flags & PKT_TX_IP_CKSUM) {
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
-               *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               *td_offset |= (tx_offload.l3_len >> 2)
+                               << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
        } else if (ol_flags & PKT_TX_IPV4) {
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
-               *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               *td_offset |= (tx_offload.l3_len >> 2)
+                               << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
        } else if (ol_flags & PKT_TX_IPV6) {
                *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
-               *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+               *td_offset |= (tx_offload.l3_len >> 2)
+                               << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+       }
+
+       if (ol_flags & PKT_TX_TCP_SEG) {
+               *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+               *td_offset |= (tx_offload.l4_len >> 2)
+                       << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+               return;
        }
 
        /* Enable L4 checksum offloads */
@@ -571,7 +620,7 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
                             "rxq->nb_rx_desc=%d",
                             rxq->rx_free_thresh, rxq->nb_rx_desc);
                ret = -EINVAL;
-       } else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0) {
+       } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
                             "rxq->nb_rx_desc=%d, "
                             "rxq->rx_free_thresh=%d",
@@ -661,14 +710,17 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
                        pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
                        pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
                        pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
-                       mb->ol_flags = pkt_flags;
 
                        mb->packet_type = (uint16_t)((qword1 &
                                        I40E_RXD_QW1_PTYPE_MASK) >>
                                        I40E_RXD_QW1_PTYPE_SHIFT);
                        if (pkt_flags & PKT_RX_RSS_HASH)
                                mb->hash.rss = rte_le_to_cpu_32(\
-                                       rxdp->wb.qword0.hi_dword.rss);
+                                       rxdp[j].wb.qword0.hi_dword.rss);
+                       if (pkt_flags & PKT_RX_FDIR)
+                               pkt_flags |= i40e_rxd_build_fdir(&rxdp[j], mb);
+
+                       mb->ol_flags = pkt_flags;
                }
 
                for (j = 0; j < I40E_LOOK_AHEAD; j++)
@@ -903,10 +955,13 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                pkt_flags |= i40e_rxd_ptype_to_pkt_flags(qword1);
                rxm->packet_type = (uint16_t)((qword1 & I40E_RXD_QW1_PTYPE_MASK) >>
                                I40E_RXD_QW1_PTYPE_SHIFT);
-               rxm->ol_flags = pkt_flags;
                if (pkt_flags & PKT_RX_RSS_HASH)
                        rxm->hash.rss =
                                rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+               if (pkt_flags & PKT_RX_FDIR)
+                       pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+
+               rxm->ol_flags = pkt_flags;
 
                rx_pkts[nb_rx++] = rxm;
        }
@@ -1060,10 +1115,13 @@ i40e_recv_scattered_pkts(void *rx_queue,
                first_seg->packet_type = (uint16_t)((qword1 &
                                        I40E_RXD_QW1_PTYPE_MASK) >>
                                        I40E_RXD_QW1_PTYPE_SHIFT);
-               first_seg->ol_flags = pkt_flags;
                if (pkt_flags & PKT_RX_RSS_HASH)
                        rxm->hash.rss =
                                rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
+               if (pkt_flags & PKT_RX_FDIR)
+                       pkt_flags |= i40e_rxd_build_fdir(&rxd, rxm);
+
+               first_seg->ol_flags = pkt_flags;
 
                /* Prefetch data of first segment, if configured to do so. */
                rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
@@ -1103,8 +1161,7 @@ i40e_calc_context_desc(uint64_t flags)
 {
        uint64_t mask = 0ULL;
 
-       if (flags | PKT_TX_VXLAN_CKSUM)
-               mask |= PKT_TX_VXLAN_CKSUM;
+       mask |= (PKT_TX_OUTER_IP_CKSUM | PKT_TX_TCP_SEG);
 
 #ifdef RTE_LIBRTE_IEEE1588
        mask |= PKT_TX_IEEE1588_TMST;
@@ -1115,6 +1172,39 @@ i40e_calc_context_desc(uint64_t flags)
        return 0;
 }
 
+/* set i40e TSO context descriptor */
+static inline uint64_t
+i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
+{
+       uint64_t ctx_desc = 0;
+       uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+       if (!tx_offload.l4_len) {
+               PMD_DRV_LOG(DEBUG, "L4 length set to 0");
+               return ctx_desc;
+       }
+
+       /**
+        * in case of tunneling packet, the outer_l2_len and
+        * outer_l3_len must be 0.
+        */
+       hdr_len = tx_offload.outer_l2_len +
+               tx_offload.outer_l3_len +
+               tx_offload.l2_len +
+               tx_offload.l3_len +
+               tx_offload.l4_len;
+
+       cd_cmd = I40E_TX_CTX_DESC_TSO;
+       cd_tso_len = mbuf->pkt_len - hdr_len;
+       ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+               ((uint64_t)cd_tso_len <<
+                I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+               ((uint64_t)mbuf->tso_segsz <<
+                I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+       return ctx_desc;
+}
+
 uint16_t
 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
@@ -1133,15 +1223,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        uint32_t tx_flags;
        uint32_t td_tag;
        uint64_t ol_flags;
-       uint8_t l2_len;
-       uint16_t l3_len;
-       uint8_t inner_l2_len;
-       uint16_t inner_l3_len;
        uint16_t nb_used;
        uint16_t nb_ctx;
        uint16_t tx_last;
        uint16_t slen;
        uint64_t buf_dma_addr;
+       union i40e_tx_offload tx_offload = {0};
 
        txq = tx_queue;
        sw_ring = txq->sw_ring;
@@ -1163,10 +1250,12 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
 
                ol_flags = tx_pkt->ol_flags;
-               l2_len = tx_pkt->l2_len;
-               inner_l2_len = tx_pkt->inner_l2_len;
-               l3_len = tx_pkt->l3_len;
-               inner_l3_len = tx_pkt->inner_l3_len;
+               tx_offload.l2_len = tx_pkt->l2_len;
+               tx_offload.l3_len = tx_pkt->l3_len;
+               tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+               tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+               tx_offload.l4_len = tx_pkt->l4_len;
+               tx_offload.tso_segsz = tx_pkt->tso_segsz;
 
                /* Calculate the number of context descriptors needed. */
                nb_ctx = i40e_calc_context_desc(ol_flags);
@@ -1215,10 +1304,10 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Enable checksum offloading */
                cd_tunneling_params = 0;
-               i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
-                                               l2_len, l3_len, inner_l2_len,
-                                               inner_l3_len,
-                                               &cd_tunneling_params);
+               if (unlikely(ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)) {
+                       i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
+                               tx_offload, &cd_tunneling_params);
+               }
 
                if (unlikely(nb_ctx)) {
                        /* Setup TX context descriptor if required */
@@ -1235,17 +1324,37 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                rte_pktmbuf_free_seg(txe->mbuf);
                                txe->mbuf = NULL;
                        }
-#ifdef RTE_LIBRTE_IEEE1588
-                       if (ol_flags & PKT_TX_IEEE1588_TMST)
+
+                       /* TSO enabled means no timestamp */
+                       if (ol_flags & PKT_TX_TCP_SEG)
                                cd_type_cmd_tso_mss |=
-                                       ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
-                                               I40E_TXD_CTX_QW1_CMD_SHIFT);
+                                       i40e_set_tso_ctx(tx_pkt, tx_offload);
+                       else {
+#ifdef RTE_LIBRTE_IEEE1588
+                               if (ol_flags & PKT_TX_IEEE1588_TMST)
+                                       cd_type_cmd_tso_mss |=
+                                               ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
+                                                I40E_TXD_CTX_QW1_CMD_SHIFT);
 #endif
+                       }
+
                        ctx_txd->tunneling_params =
                                rte_cpu_to_le_32(cd_tunneling_params);
                        ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
                        ctx_txd->type_cmd_tso_mss =
                                rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+
+                       PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
+                               "tunneling_params: %#x;\n"
+                               "l2tag2: %#hx;\n"
+                               "rsvd: %#hx;\n"
+                               "type_cmd_tso_mss: %#lx;\n",
+                               tx_pkt, tx_id,
+                               ctx_txd->tunneling_params,
+                               ctx_txd->l2tag2,
+                               ctx_txd->rsvd,
+                               ctx_txd->type_cmd_tso_mss);
+
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
@@ -1263,6 +1372,16 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        /* Setup TX Descriptor */
                        slen = m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+
+                       PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
+                               "buf_dma_addr: %#"PRIx64";\n"
+                               "td_cmd: %#x;\n"
+                               "td_offset: %#x;\n"
+                               "td_len: %u;\n"
+                               "td_tag: %#x;\n",
+                               tx_pkt, tx_id, buf_dma_addr,
+                               td_cmd, td_offset, slen, td_tag);
+
                        txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
                        txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
                                                td_offset, slen, td_tag);
@@ -1697,7 +1816,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        /* Allocate the rx queue data structure */
        rxq = rte_zmalloc_socket("i40e rx queue",
                                 sizeof(struct i40e_rx_queue),
-                                CACHE_LINE_SIZE,
+                                RTE_CACHE_LINE_SIZE,
                                 socket_id);
        if (!rxq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
@@ -1756,7 +1875,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->sw_ring =
                rte_zmalloc_socket("i40e rx sw ring",
                                   sizeof(struct i40e_rx_entry) * len,
-                                  CACHE_LINE_SIZE,
+                                  RTE_CACHE_LINE_SIZE,
                                   socket_id);
        if (!rxq->sw_ring) {
                i40e_dev_rx_queue_release(rxq);
@@ -1981,7 +2100,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("i40e tx queue",
                                  sizeof(struct i40e_tx_queue),
-                                 CACHE_LINE_SIZE,
+                                 RTE_CACHE_LINE_SIZE,
                                  socket_id);
        if (!txq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
@@ -2032,7 +2151,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->sw_ring =
                rte_zmalloc_socket("i40e tx sw ring",
                                   sizeof(struct i40e_tx_entry) * nb_desc,
-                                  CACHE_LINE_SIZE,
+                                  RTE_CACHE_LINE_SIZE,
                                   socket_id);
        if (!txq->sw_ring) {
                i40e_dev_tx_queue_release(txq);
@@ -2325,11 +2444,10 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
        struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
        struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
        struct rte_eth_dev_data *data = pf->dev_data;
-       struct rte_pktmbuf_pool_private *mbp_priv =
-                       rte_mempool_get_priv(rxq->mp);
-       uint16_t buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
-                                               RTE_PKTMBUF_HEADROOM);
-       uint16_t len;
+       uint16_t buf_size, len;
+
+       buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+               RTE_PKTMBUF_HEADROOM);
 
        switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
                        I40E_FLAG_HEADER_SPLIT_ENABLED)) {
@@ -2387,7 +2505,6 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
        uint16_t pf_q = rxq->reg_idx;
        uint16_t buf_size;
        struct i40e_hmc_obj_rxq rx_ctx;
-       struct rte_pktmbuf_pool_private *mbp_priv;
 
        err = i40e_rx_queue_config(rxq);
        if (err < 0) {
@@ -2434,9 +2551,8 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq)
 
        rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
 
-       mbp_priv = rte_mempool_get_priv(rxq->mp);
-       buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
-                                       RTE_PKTMBUF_HEADROOM);
+       buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+               RTE_PKTMBUF_HEADROOM);
 
        /* Check if scattered RX needs to be used. */
        if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
@@ -2487,7 +2603,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("i40e fdir tx queue",
                                  sizeof(struct i40e_tx_queue),
-                                 CACHE_LINE_SIZE,
+                                 RTE_CACHE_LINE_SIZE,
                                  SOCKET_ID_ANY);
        if (!txq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
@@ -2547,7 +2663,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
        /* Allocate the RX queue data structure. */
        rxq = rte_zmalloc_socket("i40e fdir rx queue",
                                  sizeof(struct i40e_rx_queue),
-                                 CACHE_LINE_SIZE,
+                                 RTE_CACHE_LINE_SIZE,
                                  SOCKET_ID_ANY);
        if (!rxq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "