mbuf: add accessors for data room and private size
[dpdk.git] / lib / librte_pmd_e1000 / igb_rxtx.c
index 08e03ed..80d05c0 100644 (file)
@@ -51,7 +51,6 @@
 #include <rte_memcpy.h>
 #include <rte_memzone.h>
 #include <rte_launch.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_per_lcore.h>
 #include <rte_lcore.h>
 #include "e1000/e1000_api.h"
 #include "e1000_ethdev.h"
 
-#define IGB_RSS_OFFLOAD_ALL ( \
-               ETH_RSS_IPV4 | \
-               ETH_RSS_IPV4_TCP | \
-               ETH_RSS_IPV6 | \
-               ETH_RSS_IPV6_EX | \
-               ETH_RSS_IPV6_TCP | \
-               ETH_RSS_IPV6_TCP_EX | \
-               ETH_RSS_IPV4_UDP | \
-               ETH_RSS_IPV6_UDP | \
-               ETH_RSS_IPV6_UDP_EX)
+/* Bit Mask to indicate what bits required for building TX context */
+#define IGB_TX_OFFLOAD_MASK (                   \
+               PKT_TX_VLAN_PKT |                \
+               PKT_TX_IP_CKSUM |                \
+               PKT_TX_L4_MASK)
 
 static inline struct rte_mbuf *
 rte_rxmbuf_alloc(struct rte_mempool *mp)
@@ -262,7 +256,7 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,
 
        if (ol_flags & PKT_TX_IP_CKSUM) {
                type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
-               cmp_mask |= TX_MAC_LEN_CMP_MASK;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
        }
 
        /* Specify which HW CTX to upload. */
@@ -361,6 +355,13 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        struct rte_mbuf     *tx_pkt;
        struct rte_mbuf     *m_seg;
        union igb_vlan_macip vlan_macip_lens;
+       union {
+               uint16_t u16;
+               struct {
+                       uint16_t l3_len:9;
+                       uint16_t l2_len:7;
+               };
+       } l2_l3_len;
        uint64_t buf_dma_addr;
        uint32_t olinfo_status;
        uint32_t cmd_type_len;
@@ -398,9 +399,11 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
 
                ol_flags = tx_pkt->ol_flags;
+               l2_l3_len.l2_len = tx_pkt->l2_len;
+               l2_l3_len.l3_len = tx_pkt->l3_len;
                vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
-               vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
-               tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
+               vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
+               tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
 
                /* If a Context Descriptor need be built . */
                if (tx_ol_req) {
@@ -415,7 +418,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
 
                PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
-                          " tx_first=%u tx_last=%u\n",
+                          " tx_first=%u tx_last=%u",
                           (unsigned) txq->port_id,
                           (unsigned) txq->queue_id,
                           (unsigned) pkt_len,
@@ -714,8 +717,8 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 * to happen by sending specific "back-pressure" flow control
                 * frames to its peer(s).
                 */
-               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-                          "staterr=0x%x pkt_len=%u\n",
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+                          "staterr=0x%x pkt_len=%u",
                           (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
                           (unsigned) rx_id, (unsigned) staterr,
                           (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -723,7 +726,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                nmb = rte_rxmbuf_alloc(rxq->mb_pool);
                if (nmb == NULL) {
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
                        rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
                        break;
@@ -808,7 +811,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
        if (nb_hold > rxq->rx_free_thresh) {
                PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-                          "nb_hold=%u nb_rx=%u\n",
+                          "nb_hold=%u nb_rx=%u",
                           (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
                           (unsigned) rx_id, (unsigned) nb_hold,
                           (unsigned) nb_rx);
@@ -895,8 +898,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 * to happen by sending specific "back-pressure" flow control
                 * frames to its peer(s).
                 */
-               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
-                          "staterr=0x%x data_len=%u\n",
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+                          "staterr=0x%x data_len=%u",
                           (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
                           (unsigned) rx_id, (unsigned) staterr,
                           (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
@@ -904,7 +907,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                nmb = rte_rxmbuf_alloc(rxq->mb_pool);
                if (nmb == NULL) {
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
-                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
                        rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
                        break;
@@ -1061,7 +1064,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
        if (nb_hold > rxq->rx_free_thresh) {
                PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
-                          "nb_hold=%u nb_rx=%u\n",
+                          "nb_hold=%u nb_rx=%u",
                           (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
                           (unsigned) rx_id, (unsigned) nb_hold,
                           (unsigned) nb_rx);
@@ -1161,8 +1164,7 @@ igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
 static void
 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
 {
-       static const union e1000_adv_tx_desc zeroed_desc = { .read = {
-                       .buffer_addr = 0}};
+       static const union e1000_adv_tx_desc zeroed_desc = {{0}};
        struct igb_tx_entry *txe = txq->sw_ring;
        uint16_t i, prev;
        struct e1000_hw *hw;
@@ -1240,7 +1242,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* First allocate the tx queue data structure */
        txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
-                                                       CACHE_LINE_SIZE);
+                                                       RTE_CACHE_LINE_SIZE);
        if (txq == NULL)
                return (-ENOMEM);
 
@@ -1278,12 +1280,12 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
        /* Allocate software ring */
        txq->sw_ring = rte_zmalloc("txq->sw_ring",
                                   sizeof(struct igb_tx_entry) * nb_desc,
-                                  CACHE_LINE_SIZE);
+                                  RTE_CACHE_LINE_SIZE);
        if (txq->sw_ring == NULL) {
                igb_tx_queue_release(txq);
                return (-ENOMEM);
        }
-       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
 
        igb_reset_tx_queue(txq, dev);
@@ -1327,8 +1329,7 @@ eth_igb_rx_queue_release(void *rxq)
 static void
 igb_reset_rx_queue(struct igb_rx_queue *rxq)
 {
-       static const union e1000_adv_rx_desc zeroed_desc = { .read = {
-                       .pkt_addr = 0}};
+       static const union e1000_adv_rx_desc zeroed_desc = {{0}};
        unsigned i;
 
        /* Zero out HW ring memory */
@@ -1374,7 +1375,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
 
        /* First allocate the RX queue data structure. */
        rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
-                         CACHE_LINE_SIZE);
+                         RTE_CACHE_LINE_SIZE);
        if (rxq == NULL)
                return (-ENOMEM);
        rxq->mb_pool = mp;
@@ -1416,12 +1417,12 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        /* Allocate software ring. */
        rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
                                   sizeof(struct igb_rx_entry) * nb_desc,
-                                  CACHE_LINE_SIZE);
+                                  RTE_CACHE_LINE_SIZE);
        if (rxq->sw_ring == NULL) {
                igb_rx_queue_release(rxq);
                return (-ENOMEM);
        }
-       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
 
        dev->data->rx_queues[queue_idx] = rxq;
@@ -1439,7 +1440,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        uint32_t desc = 0;
 
        if (rx_queue_id >= dev->data->nb_rx_queues) {
-               PMD_RX_LOG(ERR, "Invalid RX queue id=%d\n", rx_queue_id);
+               PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
                return 0;
        }
 
@@ -1567,19 +1568,19 @@ igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
        mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
        if (rss_hf & ETH_RSS_IPV4)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
-       if (rss_hf & ETH_RSS_IPV4_TCP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
        if (rss_hf & ETH_RSS_IPV6)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
        if (rss_hf & ETH_RSS_IPV6_EX)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
-       if (rss_hf & ETH_RSS_IPV6_TCP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
        if (rss_hf & ETH_RSS_IPV6_TCP_EX)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
-       if (rss_hf & ETH_RSS_IPV4_UDP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
-       if (rss_hf & ETH_RSS_IPV6_UDP)
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
        if (rss_hf & ETH_RSS_IPV6_UDP_EX)
                mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
@@ -1649,19 +1650,19 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
                rss_hf |= ETH_RSS_IPV4;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
-               rss_hf |= ETH_RSS_IPV4_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
                rss_hf |= ETH_RSS_IPV6;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
                rss_hf |= ETH_RSS_IPV6_EX;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
-               rss_hf |= ETH_RSS_IPV6_TCP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
                rss_hf |= ETH_RSS_IPV6_TCP_EX;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
-               rss_hf |= ETH_RSS_IPV4_UDP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
-               rss_hf |= ETH_RSS_IPV6_UDP;
+               rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
                rss_hf |= ETH_RSS_IPV6_UDP_EX;
        rss_conf->rss_hf = rss_hf;
@@ -1738,7 +1739,7 @@ igb_is_vmdq_supported(const struct rte_eth_dev *dev)
        case e1000_i210:
        case e1000_i211:
        default:
-               PMD_INIT_LOG(ERR, "Cannot support VMDq feature\n");
+               PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
                return 0;
        }
 }
@@ -1751,7 +1752,8 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        uint32_t mrqc, vt_ctl, vmolr, rctl;
        int i;
 
-       PMD_INIT_LOG(DEBUG, ">>");
+       PMD_INIT_FUNC_TRACE();
+
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
 
@@ -1778,6 +1780,26 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
        E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
 
+       for (i = 0; i < E1000_VMOLR_SIZE; i++) {
+               vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
+               vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
+                       E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
+                       E1000_VMOLR_MPME);
+
+               if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
+                       vmolr |= E1000_VMOLR_AUPE;
+               if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
+                       vmolr |= E1000_VMOLR_ROMPE;
+               if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
+                       vmolr |= E1000_VMOLR_ROPE;
+               if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
+                       vmolr |= E1000_VMOLR_BAM;
+               if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
+                       vmolr |= E1000_VMOLR_MPME;
+
+               E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
+       }
+
        /*
         * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
         * Both 82576 and 82580 support it
@@ -1840,7 +1862,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
 
                if (mbuf == NULL) {
                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
-                                    "queue_id=%hu\n", rxq->queue_id);
+                                    "queue_id=%hu", rxq->queue_id);
                        return (-ENOMEM);
                }
                dma_addr =
@@ -1899,7 +1921,6 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
 {
        struct e1000_hw     *hw;
        struct igb_rx_queue *rxq;
-       struct rte_pktmbuf_pool_private *mbp_priv;
        uint32_t rctl;
        uint32_t rxcsum;
        uint32_t srrctl;
@@ -1969,9 +1990,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                /*
                 * Configure RX buffer size.
                 */
-               mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
-               buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
-                                      RTE_PKTMBUF_HEADROOM);
+               buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+                       RTE_PKTMBUF_HEADROOM);
                if (buf_size >= 1024) {
                        /*
                         * Configure the BSIZEPACKET field of the SRRCTL
@@ -1989,6 +2009,9 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                        /* It adds dual VLAN length for supporting dual VLAN */
                        if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
                                                2 * VLAN_TAG_SIZE) > buf_size){
+                               if (!dev->data->scattered_rx)
+                                       PMD_INIT_LOG(DEBUG,
+                                                    "forcing scatter mode");
                                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
                                dev->data->scattered_rx = 1;
                        }
@@ -1998,6 +2021,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                         */
                        if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
                                rctl_bsize = buf_size;
+                       if (!dev->data->scattered_rx)
+                               PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
                        dev->data->scattered_rx = 1;
                }
@@ -2019,6 +2044,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
        }
 
        if (dev->data->dev_conf.rxmode.enable_scatter) {
+               if (!dev->data->scattered_rx)
+                       PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
                dev->data->scattered_rx = 1;
        }
@@ -2192,7 +2219,6 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
 {
        struct e1000_hw     *hw;
        struct igb_rx_queue *rxq;
-       struct rte_pktmbuf_pool_private *mbp_priv;
        uint32_t srrctl;
        uint16_t buf_size;
        uint16_t rctl_bsize;
@@ -2233,9 +2259,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                /*
                 * Configure RX buffer size.
                 */
-               mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
-               buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
-                                      RTE_PKTMBUF_HEADROOM);
+               buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
+                       RTE_PKTMBUF_HEADROOM);
                if (buf_size >= 1024) {
                        /*
                         * Configure the BSIZEPACKET field of the SRRCTL
@@ -2253,6 +2278,9 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                        /* It adds dual VLAN length for supporting dual VLAN */
                        if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
                                                2 * VLAN_TAG_SIZE) > buf_size){
+                               if (!dev->data->scattered_rx)
+                                       PMD_INIT_LOG(DEBUG,
+                                                    "forcing scatter mode");
                                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
                                dev->data->scattered_rx = 1;
                        }
@@ -2262,6 +2290,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                         */
                        if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
                                rctl_bsize = buf_size;
+                       if (!dev->data->scattered_rx)
+                               PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
                        dev->data->scattered_rx = 1;
                }
@@ -2285,7 +2315,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                         * to avoid Write-Back not triggered sometimes
                         */
                        rxdctl |= 0x10000;
-                       PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !\n");
+                       PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
                }
                else
                        rxdctl |= ((rxq->wthresh & 0x1F) << 16);
@@ -2293,6 +2323,8 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
        }
 
        if (dev->data->dev_conf.rxmode.enable_scatter) {
+               if (!dev->data->scattered_rx)
+                       PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
                dev->data->scattered_rx = 1;
        }
@@ -2353,7 +2385,7 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
                         * to avoid Write-Back not triggered sometimes
                         */
                        txdctl |= 0x10000;
-                       PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !\n");
+                       PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
                }
                else
                        txdctl |= ((txq->wthresh & 0x1F) << 16);