ethdev: remove legacy Rx descriptor done API
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index 050541a..e378e66 100644 (file)
@@ -15,7 +15,7 @@
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_prefetch.h>
 #include <rte_string_fns.h>
 #include <rte_errno.h>
@@ -27,7 +27,7 @@
 
 #include "virtio_logs.h"
 #include "virtio_ethdev.h"
-#include "virtio_pci.h"
+#include "virtio.h"
 #include "virtqueue.h"
 #include "virtio_rxtx.h"
 #include "virtio_rxtx_simple.h"
 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
 #endif
 
-int
-virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
-{
-       struct virtnet_rx *rxvq = rxq;
-       struct virtqueue *vq = rxvq->vq;
-
-       return VIRTQUEUE_NUSED(vq) >= offset;
-}
-
 void
 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
 {
@@ -147,7 +138,7 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
 
        for (i = 0; i < num; i++) {
                used_idx = vq->vq_used_cons_idx;
-               /* desc_is_used has a load-acquire or rte_cio_rmb inside
+               /* desc_is_used has a load-acquire or rte_io_rmb inside
                 * and wait for used desc in virtqueue.
                 */
                if (!desc_is_used(&desc[used_idx], vq))
@@ -271,13 +262,10 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
                dxp->cookie = (void *)cookies[i];
                dxp->ndescs = 1;
 
-               start_dp[idx].addr =
-                               VIRTIO_MBUF_ADDR(cookies[i], vq) +
-                               RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-               start_dp[idx].len =
-                               cookies[i]->buf_len -
-                               RTE_PKTMBUF_HEADROOM +
-                               hw->vtnet_hdr_size;
+               start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
+                       RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+               start_dp[idx].len = cookies[i]->buf_len -
+                       RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
                start_dp[idx].flags =  VRING_DESC_F_WRITE;
 
                vq_update_avail_ring(vq, idx);
@@ -313,11 +301,9 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
                dxp->cookie = (void *)cookie[i];
                dxp->ndescs = 1;
 
-               start_dp[idx].addr =
-                       VIRTIO_MBUF_ADDR(cookie[i], vq) +
+               start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
                        RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-               start_dp[idx].len =
-                       cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
+               start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
                        hw->vtnet_hdr_size;
                start_dp[idx].flags = VRING_DESC_F_WRITE;
                vq->vq_desc_head_idx = start_dp[idx].next;
@@ -333,13 +319,32 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
        return 0;
 }
 
+static inline void
+virtqueue_refill_single_packed(struct virtqueue *vq,
+                              struct vring_packed_desc *dp,
+                              struct rte_mbuf *cookie)
+{
+       uint16_t flags = vq->vq_packed.cached_flags;
+       struct virtio_hw *hw = vq->hw;
+
+       dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+       dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+
+       virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
+
+       if (++vq->vq_avail_idx >= vq->vq_nentries) {
+               vq->vq_avail_idx -= vq->vq_nentries;
+               vq->vq_packed.cached_flags ^=
+                       VRING_PACKED_DESC_F_AVAIL_USED;
+               flags = vq->vq_packed.cached_flags;
+       }
+}
+
 static inline int
-virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
                                     struct rte_mbuf **cookie, uint16_t num)
 {
        struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
-       uint16_t flags = vq->vq_packed.cached_flags;
-       struct virtio_hw *hw = vq->hw;
        struct vq_desc_extra *dxp;
        uint16_t idx;
        int i;
@@ -355,24 +360,34 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
                dxp->cookie = (void *)cookie[i];
                dxp->ndescs = 1;
 
-               start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
-                               RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-               start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
-                                       + hw->vtnet_hdr_size;
+               virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
+       }
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+       return 0;
+}
 
-               vq->vq_desc_head_idx = dxp->next;
-               if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
-                       vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+                                    struct rte_mbuf **cookie, uint16_t num)
+{
+       struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+       struct vq_desc_extra *dxp;
+       uint16_t idx, did;
+       int i;
 
-               virtqueue_store_flags_packed(&start_dp[idx], flags,
-                                            hw->weak_barriers);
+       if (unlikely(vq->vq_free_cnt == 0))
+               return -ENOSPC;
+       if (unlikely(vq->vq_free_cnt < num))
+               return -EMSGSIZE;
 
-               if (++vq->vq_avail_idx >= vq->vq_nentries) {
-                       vq->vq_avail_idx -= vq->vq_nentries;
-                       vq->vq_packed.cached_flags ^=
-                               VRING_PACKED_DESC_F_AVAIL_USED;
-                       flags = vq->vq_packed.cached_flags;
-               }
+       for (i = 0; i < num; i++) {
+               idx = vq->vq_avail_idx;
+               did = start_dp[idx].id;
+               dxp = &vq->vq_descx[did];
+               dxp->cookie = (void *)cookie[i];
+               dxp->ndescs = 1;
+
+               virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
        }
        vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
        return 0;
@@ -429,7 +444,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                        uint16_t num)
 {
        struct vq_desc_extra *dxp;
-       struct virtqueue *vq = txvq->vq;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct vring_desc *start_dp;
        struct virtio_net_hdr *hdr;
        uint16_t idx;
@@ -453,11 +468,10 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                if (!vq->hw->has_tx_offload)
                        virtqueue_clear_net_hdr(hdr);
                else
-                       virtqueue_xmit_offload(hdr, cookies[i], true);
+                       virtqueue_xmit_offload(hdr, cookies[i]);
 
-               start_dp[idx].addr  =
-                       VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
-               start_dp[idx].len   = cookies[i]->data_len + head_size;
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+               start_dp[idx].len = cookies[i]->data_len + head_size;
                start_dp[idx].flags = 0;
 
 
@@ -476,7 +490,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
                                   struct rte_mbuf *cookie,
                                   int in_order)
 {
-       struct virtqueue *vq = txvq->vq;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct vring_packed_desc *dp;
        struct vq_desc_extra *dxp;
        uint16_t idx, id, flags;
@@ -501,11 +515,11 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        if (!vq->hw->has_tx_offload)
                virtqueue_clear_net_hdr(hdr);
        else
-               virtqueue_xmit_offload(hdr, cookie, true);
+               virtqueue_xmit_offload(hdr, cookie);
 
        dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
-       dp->len  = cookie->data_len + head_size;
-       dp->id   = id;
+       dp->len = cookie->data_len + head_size;
+       dp->id = id;
 
        if (++vq->vq_avail_idx >= vq->vq_nentries) {
                vq->vq_avail_idx -= vq->vq_nentries;
@@ -530,7 +544,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 {
        struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
        struct vq_desc_extra *dxp;
-       struct virtqueue *vq = txvq->vq;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct vring_desc *start_dp;
        uint16_t seg_num = cookie->nb_segs;
        uint16_t head_idx, idx;
@@ -587,11 +601,12 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                idx = start_dp[idx].next;
        }
 
-       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+       if (vq->hw->has_tx_offload)
+               virtqueue_xmit_offload(hdr, cookie);
 
        do {
-               start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
-               start_dp[idx].len   = cookie->data_len;
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+               start_dp[idx].len = cookie->data_len;
                if (prepend_header) {
                        start_dp[idx].addr -= head_size;
                        start_dp[idx].len += head_size;
@@ -620,9 +635,9 @@ virtio_dev_cq_start(struct rte_eth_dev *dev)
 {
        struct virtio_hw *hw = dev->data->dev_private;
 
-       if (hw->cvq && hw->cvq->vq) {
+       if (hw->cvq) {
                rte_spinlock_init(&hw->cvq->lock);
-               VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
+               VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
        }
 }
 
@@ -634,11 +649,13 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        const struct rte_eth_rxconf *rx_conf,
                        struct rte_mempool *mp)
 {
-       uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+       uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
        struct virtio_hw *hw = dev->data->dev_private;
-       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+       struct virtqueue *vq = hw->vqs[vq_idx];
        struct virtnet_rx *rxvq;
        uint16_t rx_free_thresh;
+       uint16_t buf_size;
+       const char *error;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -647,30 +664,44 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       buf_size = virtio_rx_mem_pool_buf_size(mp);
+       if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
+                                    hw->rx_ol_scatter, &error)) {
+               PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
+                            queue_idx, error);
+               return -EINVAL;
+       }
+
        rx_free_thresh = rx_conf->rx_free_thresh;
        if (rx_free_thresh == 0)
                rx_free_thresh =
                        RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
 
        if (rx_free_thresh & 0x3) {
-               RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
-                       " (rx_free_thresh=%u port=%u queue=%u)\n",
+               PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
+                       " (rx_free_thresh=%u port=%u queue=%u)",
                        rx_free_thresh, dev->data->port_id, queue_idx);
                return -EINVAL;
        }
 
        if (rx_free_thresh >= vq->vq_nentries) {
-               RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
+               PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
                        "number of RX entries (%u)."
-                       " (rx_free_thresh=%u port=%u queue=%u)\n",
+                       " (rx_free_thresh=%u port=%u queue=%u)",
                        vq->vq_nentries,
                        rx_free_thresh, dev->data->port_id, queue_idx);
                return -EINVAL;
        }
        vq->vq_free_thresh = rx_free_thresh;
 
-       if (nb_desc == 0 || nb_desc > vq->vq_nentries)
+       /*
+        * For split ring vectorized path descriptors number must be
+        * equal to the ring size.
+        */
+       if (nb_desc > vq->vq_nentries ||
+           (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
                nb_desc = vq->vq_nentries;
+       }
        vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
 
        rxvq = &vq->rxq;
@@ -684,21 +715,21 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
 int
 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 {
-       uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+       uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
        struct virtio_hw *hw = dev->data->dev_private;
-       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+       struct virtqueue *vq = hw->vqs[vq_idx];
        struct virtnet_rx *rxvq = &vq->rxq;
        struct rte_mbuf *m;
        uint16_t desc_idx;
        int error, nbufs, i;
-       bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
+       bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
 
        PMD_INIT_FUNC_TRACE();
 
        /* Allocate blank mbufs for the each rx descriptor */
        nbufs = 0;
 
-       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
+       if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
                for (desc_idx = 0; desc_idx < vq->vq_nentries;
                     desc_idx++) {
                        vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
@@ -709,19 +740,16 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                virtio_rxq_vec_setup(rxvq);
        }
 
-       memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
-       for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
-            desc_idx++) {
-               vq->sw_ring[vq->vq_nentries + desc_idx] =
-                       &rxvq->fake_mbuf;
-       }
+       memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
+       for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
+               vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
 
-       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
+       if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
                while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
                        virtio_rxq_rearm_vec(rxvq);
                        nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
                }
-       } else if (!vtpci_packed_queue(vq->hw) && in_order) {
+       } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
                if ((!virtqueue_full(vq))) {
                        uint16_t free_cnt = vq->vq_free_cnt;
                        struct rte_mbuf *pkts[free_cnt];
@@ -734,10 +762,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                                if (unlikely(error)) {
                                        for (i = 0; i < free_cnt; i++)
                                                rte_pktmbuf_free(pkts[i]);
+                               } else {
+                                       nbufs += free_cnt;
                                }
                        }
 
-                       nbufs += free_cnt;
                        vq_update_avail_idx(vq);
                }
        } else {
@@ -747,8 +776,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                                break;
 
                        /* Enqueue allocated buffers */
-                       if (vtpci_packed_queue(vq->hw))
-                               error = virtqueue_enqueue_recv_refill_packed(vq,
+                       if (virtio_with_packed_queue(vq->hw))
+                               error = virtqueue_enqueue_recv_refill_packed_init(vq,
                                                &m, 1);
                        else
                                error = virtqueue_enqueue_recv_refill(vq,
@@ -760,7 +789,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                        nbufs++;
                }
 
-               if (!vtpci_packed_queue(vq->hw))
+               if (!virtio_with_packed_queue(vq->hw))
                        vq_update_avail_idx(vq);
        }
 
@@ -785,9 +814,9 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        unsigned int socket_id __rte_unused,
                        const struct rte_eth_txconf *tx_conf)
 {
-       uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+       uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
        struct virtio_hw *hw = dev->data->dev_private;
-       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+       struct virtqueue *vq = hw->vqs[vq_idx];
        struct virtnet_tx *txvq;
        uint16_t tx_free_thresh;
 
@@ -813,7 +842,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
        if (tx_free_thresh >= (vq->vq_nentries - 3)) {
                PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
                        "number of TX entries minus 3 (%u)."
-                       " (tx_free_thresh=%u port=%u queue=%u)\n",
+                       " (tx_free_thresh=%u port=%u queue=%u)",
                        vq->vq_nentries - 3,
                        tx_free_thresh, dev->data->port_id, queue_idx);
                return -EINVAL;
@@ -829,14 +858,14 @@ int
 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
                                uint16_t queue_idx)
 {
-       uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+       uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
        struct virtio_hw *hw = dev->data->dev_private;
-       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+       struct virtqueue *vq = hw->vqs[vq_idx];
 
        PMD_INIT_FUNC_TRACE();
 
-       if (!vtpci_packed_queue(hw)) {
-               if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
+       if (!virtio_with_packed_queue(hw)) {
+               if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
                        vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
        }
 
@@ -853,7 +882,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
         * Requeue the discarded mbuf. This should always be
         * successful since it was just dequeued.
         */
-       if (vtpci_packed_queue(vq->hw))
+       if (virtio_with_packed_queue(vq->hw))
                error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
        else
                error = virtqueue_enqueue_recv_refill(vq, &m, 1);
@@ -910,9 +939,10 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
                         */
                        uint16_t csum = 0, off;
 
-                       rte_raw_cksum_mbuf(m, hdr->csum_start,
+                       if (rte_raw_cksum_mbuf(m, hdr->csum_start,
                                rte_pktmbuf_pkt_len(m) - hdr->csum_start,
-                               &csum);
+                               &csum) < 0)
+                               return -EINVAL;
                        if (likely(csum != 0xffff))
                                csum = ~csum;
                        off = hdr->csum_offset + hdr->csum_start;
@@ -953,7 +983,7 @@ uint16_t
 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
        struct virtnet_rx *rxvq = rx_queue;
-       struct virtqueue *vq = rxvq->vq;
+       struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
        struct virtio_hw *hw = vq->hw;
        struct rte_mbuf *rxm;
        uint16_t nb_used, num, nb_rx;
@@ -968,9 +998,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        if (unlikely(hw->started == 0))
                return nb_rx;
 
-       nb_used = VIRTQUEUE_NUSED(vq);
-
-       virtio_rmb(hw->weak_barriers);
+       nb_used = virtqueue_nused(vq);
 
        num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
        if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
@@ -1062,7 +1090,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
                        uint16_t nb_pkts)
 {
        struct virtnet_rx *rxvq = rx_queue;
-       struct virtqueue *vq = rxvq->vq;
+       struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
        struct virtio_hw *hw = vq->hw;
        struct rte_mbuf *rxm;
        uint16_t num, nb_rx;
@@ -1165,7 +1193,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
                        uint16_t nb_pkts)
 {
        struct virtnet_rx *rxvq = rx_queue;
-       struct virtqueue *vq = rxvq->vq;
+       struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
        struct virtio_hw *hw = vq->hw;
        struct rte_mbuf *rxm;
        struct rte_mbuf *prev = NULL;
@@ -1183,12 +1211,10 @@ virtio_recv_pkts_inorder(void *rx_queue,
        if (unlikely(hw->started == 0))
                return nb_rx;
 
-       nb_used = VIRTQUEUE_NUSED(vq);
+       nb_used = virtqueue_nused(vq);
        nb_used = RTE_MIN(nb_used, nb_pkts);
        nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
 
-       virtio_rmb(hw->weak_barriers);
-
        PMD_RX_LOG(DEBUG, "used:%d", nb_used);
 
        nb_enqueued = 0;
@@ -1218,7 +1244,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
                         ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
                         - hdr_size);
 
-               if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+               if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
                        seg_num = header->num_buffers;
                        if (seg_num == 0)
                                seg_num = 1;
@@ -1277,8 +1303,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
                uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
                                        VIRTIO_MBUF_BURST_SZ);
 
-               if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
-                       virtio_rmb(hw->weak_barriers);
+               if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
                        num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
                                                           rcv_cnt);
                        uint16_t extra_idx = 0;
@@ -1352,7 +1377,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                        uint16_t nb_pkts)
 {
        struct virtnet_rx *rxvq = rx_queue;
-       struct virtqueue *vq = rxvq->vq;
+       struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
        struct virtio_hw *hw = vq->hw;
        struct rte_mbuf *rxm;
        struct rte_mbuf *prev = NULL;
@@ -1369,9 +1394,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
        if (unlikely(hw->started == 0))
                return nb_rx;
 
-       nb_used = VIRTQUEUE_NUSED(vq);
-
-       virtio_rmb(hw->weak_barriers);
+       nb_used = virtqueue_nused(vq);
 
        PMD_RX_LOG(DEBUG, "used:%d", nb_used);
 
@@ -1459,8 +1482,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
                                        VIRTIO_MBUF_BURST_SZ);
 
-               if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
-                       virtio_rmb(hw->weak_barriers);
+               if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
                        num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
                                                           rcv_cnt);
                        uint16_t extra_idx = 0;
@@ -1533,7 +1555,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
                        uint16_t nb_pkts)
 {
        struct virtnet_rx *rxvq = rx_queue;
-       struct virtqueue *vq = rxvq->vq;
+       struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
        struct virtio_hw *hw = vq->hw;
        struct rte_mbuf *rxm;
        struct rte_mbuf *prev = NULL;
@@ -1744,11 +1766,11 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                        uint16_t nb_pkts)
 {
        struct virtnet_tx *txvq = tx_queue;
-       struct virtqueue *vq = txvq->vq;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct virtio_hw *hw = vq->hw;
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_tx = 0;
-       bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
+       bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -1764,11 +1786,11 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                struct rte_mbuf *txm = tx_pkts[nb_tx];
-               int can_push = 0, slots, need;
+               int can_push = 0, use_indirect = 0, slots, need;
 
                /* optimize ring usage */
-               if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
-                     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+               if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+                     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
                    rte_mbuf_refcnt_read(txm) == 1 &&
                    RTE_MBUF_DIRECT(txm) &&
                    txm->nb_segs == 1 &&
@@ -1776,12 +1798,15 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
                           __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
                        can_push = 1;
-
+               else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+                        txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+                       use_indirect = 1;
                /* How many main ring entries are needed to this Tx?
+                * indirect   => 1
                 * any_layout => number of segments
                 * default    => number of segments + 1
                 */
-               slots = txm->nb_segs + !can_push;
+               slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
                need = slots - vq->vq_free_cnt;
 
                /* Positive value indicates it need free vring descriptors */
@@ -1799,7 +1824,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                if (can_push)
                        virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
                else
-                       virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0,
+                       virtqueue_enqueue_xmit_packed(txvq, txm, slots,
+                                                     use_indirect, 0,
                                                      in_order);
 
                virtio_update_packet_stats(&txvq->stats, txm);
@@ -1821,7 +1847,7 @@ uint16_t
 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        struct virtnet_tx *txvq = tx_queue;
-       struct virtqueue *vq = txvq->vq;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct virtio_hw *hw = vq->hw;
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_used, nb_tx = 0;
@@ -1833,9 +1859,9 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                return nb_pkts;
 
        PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
-       nb_used = VIRTQUEUE_NUSED(vq);
 
-       virtio_rmb(hw->weak_barriers);
+       nb_used = virtqueue_nused(vq);
+
        if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
                virtio_xmit_cleanup(vq, nb_used);
 
@@ -1844,8 +1870,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                int can_push = 0, use_indirect = 0, slots, need;
 
                /* optimize ring usage */
-               if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
-                     vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+               if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+                     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
                    rte_mbuf_refcnt_read(txm) == 1 &&
                    RTE_MBUF_DIRECT(txm) &&
                    txm->nb_segs == 1 &&
@@ -1853,7 +1879,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
                                   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
                        can_push = 1;
-               else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+               else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
                         txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
                        use_indirect = 1;
 
@@ -1867,8 +1893,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                /* Positive value indicates it need free vring descriptors */
                if (unlikely(need > 0)) {
-                       nb_used = VIRTQUEUE_NUSED(vq);
-                       virtio_rmb(hw->weak_barriers);
+                       nb_used = virtqueue_nused(vq);
+
                        need = RTE_MIN(need, (int)nb_used);
 
                        virtio_xmit_cleanup(vq, need);
@@ -1905,11 +1931,9 @@ static __rte_always_inline int
 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
 {
        uint16_t nb_used, nb_clean, nb_descs;
-       struct virtio_hw *hw = vq->hw;
 
        nb_descs = vq->vq_free_cnt + need;
-       nb_used = VIRTQUEUE_NUSED(vq);
-       virtio_rmb(hw->weak_barriers);
+       nb_used = virtqueue_nused(vq);
        nb_clean = RTE_MIN(need, (int)nb_used);
 
        virtio_xmit_cleanup_inorder(vq, nb_clean);
@@ -1923,7 +1947,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
                        uint16_t nb_pkts)
 {
        struct virtnet_tx *txvq = tx_queue;
-       struct virtqueue *vq = txvq->vq;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct virtio_hw *hw = vq->hw;
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
@@ -1938,9 +1962,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 
        VIRTQUEUE_DUMP(vq);
        PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
-       nb_used = VIRTQUEUE_NUSED(vq);
+       nb_used = virtqueue_nused(vq);
 
-       virtio_rmb(hw->weak_barriers);
        if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
                virtio_xmit_cleanup_inorder(vq, nb_used);
 
@@ -1949,8 +1972,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
                int slots;
 
                /* optimize ring usage */
-               if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
-                    vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+               if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+                    virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
                     rte_mbuf_refcnt_read(txm) == 1 &&
                     RTE_MBUF_DIRECT(txm) &&
                     txm->nb_segs == 1 &&
@@ -2031,8 +2054,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
        return nb_tx;
 }
 
-#ifndef CC_AVX512_SUPPORT
-uint16_t
+__rte_weak uint16_t
 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
                            struct rte_mbuf **rx_pkts __rte_unused,
                            uint16_t nb_pkts __rte_unused)
@@ -2040,11 +2062,10 @@ virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
        return 0;
 }
 
-uint16_t
+__rte_weak uint16_t
 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
                            struct rte_mbuf **tx_pkts __rte_unused,
                            uint16_t nb_pkts __rte_unused)
 {
        return 0;
 }
-#endif /* ifndef CC_AVX512_SUPPORT */