net/virtio: add vectorized packed ring Rx
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index 929aa4c..c50980c 100644 (file)
@@ -89,23 +89,6 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
        dp->next = VQ_RING_DESC_CHAIN_END;
 }
 
-static void
-vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
-{
-       struct vq_desc_extra *dxp;
-
-       dxp = &vq->vq_descx[id];
-       vq->vq_free_cnt += dxp->ndescs;
-
-       if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
-               vq->vq_desc_head_idx = id;
-       else
-               vq->vq_descx[vq->vq_desc_tail_idx].next = id;
-
-       vq->vq_desc_tail_idx = id;
-       dxp->next = VQ_RING_DESC_CHAIN_END;
-}
-
 void
 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
 {
@@ -164,9 +147,11 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
 
        for (i = 0; i < num; i++) {
                used_idx = vq->vq_used_cons_idx;
+               /* desc_is_used has a load-acquire or rte_cio_rmb inside
+                * and wait for used desc in virtqueue.
+                */
                if (!desc_is_used(&desc[used_idx], vq))
                        return i;
-               virtio_rmb(vq->hw->weak_barriers);
                len[i] = desc[used_idx].len;
                id = desc[used_idx].id;
                cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
@@ -262,126 +247,6 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
        return i;
 }
 
-#ifndef DEFAULT_TX_FREE_THRESH
-#define DEFAULT_TX_FREE_THRESH 32
-#endif
-
-static void
-virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
-{
-       uint16_t used_idx, id, curr_id, free_cnt = 0;
-       uint16_t size = vq->vq_nentries;
-       struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
-       struct vq_desc_extra *dxp;
-
-       used_idx = vq->vq_used_cons_idx;
-       while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
-               virtio_rmb(vq->hw->weak_barriers);
-               id = desc[used_idx].id;
-               do {
-                       curr_id = used_idx;
-                       dxp = &vq->vq_descx[used_idx];
-                       used_idx += dxp->ndescs;
-                       free_cnt += dxp->ndescs;
-                       num -= dxp->ndescs;
-                       if (used_idx >= size) {
-                               used_idx -= size;
-                               vq->vq_packed.used_wrap_counter ^= 1;
-                       }
-                       if (dxp->cookie != NULL) {
-                               rte_pktmbuf_free(dxp->cookie);
-                               dxp->cookie = NULL;
-                       }
-               } while (curr_id != id);
-       }
-       vq->vq_used_cons_idx = used_idx;
-       vq->vq_free_cnt += free_cnt;
-}
-
-static void
-virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
-{
-       uint16_t used_idx, id;
-       uint16_t size = vq->vq_nentries;
-       struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
-       struct vq_desc_extra *dxp;
-
-       used_idx = vq->vq_used_cons_idx;
-       while (num-- && desc_is_used(&desc[used_idx], vq)) {
-               virtio_rmb(vq->hw->weak_barriers);
-               id = desc[used_idx].id;
-               dxp = &vq->vq_descx[id];
-               vq->vq_used_cons_idx += dxp->ndescs;
-               if (vq->vq_used_cons_idx >= size) {
-                       vq->vq_used_cons_idx -= size;
-                       vq->vq_packed.used_wrap_counter ^= 1;
-               }
-               vq_ring_free_id_packed(vq, id);
-               if (dxp->cookie != NULL) {
-                       rte_pktmbuf_free(dxp->cookie);
-                       dxp->cookie = NULL;
-               }
-               used_idx = vq->vq_used_cons_idx;
-       }
-}
-
-/* Cleanup from completed transmits. */
-static inline void
-virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
-{
-       if (in_order)
-               virtio_xmit_cleanup_inorder_packed(vq, num);
-       else
-               virtio_xmit_cleanup_normal_packed(vq, num);
-}
-
-static void
-virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
-{
-       uint16_t i, used_idx, desc_idx;
-       for (i = 0; i < num; i++) {
-               struct vring_used_elem *uep;
-               struct vq_desc_extra *dxp;
-
-               used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
-               uep = &vq->vq_split.ring.used->ring[used_idx];
-
-               desc_idx = (uint16_t) uep->id;
-               dxp = &vq->vq_descx[desc_idx];
-               vq->vq_used_cons_idx++;
-               vq_ring_free_chain(vq, desc_idx);
-
-               if (dxp->cookie != NULL) {
-                       rte_pktmbuf_free(dxp->cookie);
-                       dxp->cookie = NULL;
-               }
-       }
-}
-
-/* Cleanup from completed inorder transmits. */
-static __rte_always_inline void
-virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
-{
-       uint16_t i, idx = vq->vq_used_cons_idx;
-       int16_t free_cnt = 0;
-       struct vq_desc_extra *dxp = NULL;
-
-       if (unlikely(num == 0))
-               return;
-
-       for (i = 0; i < num; i++) {
-               dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
-               free_cnt += dxp->ndescs;
-               if (dxp->cookie != NULL) {
-                       rte_pktmbuf_free(dxp->cookie);
-                       dxp->cookie = NULL;
-               }
-       }
-
-       vq->vq_free_cnt += free_cnt;
-       vq->vq_used_cons_idx = idx;
-}
-
 static inline int
 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
                        struct rte_mbuf **cookies,
@@ -498,8 +363,10 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
                vq->vq_desc_head_idx = dxp->next;
                if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
                        vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
-               virtio_wmb(hw->weak_barriers);
-               start_dp[idx].flags = flags;
+
+               virtqueue_store_flags_packed(&start_dp[idx], flags,
+                                            hw->weak_barriers);
+
                if (++vq->vq_avail_idx >= vq->vq_nentries) {
                        vq->vq_avail_idx -= vq->vq_nentries;
                        vq->vq_packed.cached_flags ^=
@@ -554,68 +421,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
 }
 
 
-/* avoid write operation when necessary, to lessen cache issues */
-#define ASSIGN_UNLESS_EQUAL(var, val) do {     \
-       if ((var) != (val))                     \
-               (var) = (val);                  \
-} while (0)
-
-#define virtqueue_clear_net_hdr(_hdr) do {             \
-       ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0);     \
-       ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0);    \
-       ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0);          \
-       ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0);       \
-       ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0);       \
-       ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0);        \
-} while (0)
-
-static inline void
-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
-                       struct rte_mbuf *cookie,
-                       bool offload)
-{
-       if (offload) {
-               if (cookie->ol_flags & PKT_TX_TCP_SEG)
-                       cookie->ol_flags |= PKT_TX_TCP_CKSUM;
-
-               switch (cookie->ol_flags & PKT_TX_L4_MASK) {
-               case PKT_TX_UDP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_udp_hdr,
-                               dgram_cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               case PKT_TX_TCP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               default:
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
-                       break;
-               }
 
-               /* TCP Segmentation Offload */
-               if (cookie->ol_flags & PKT_TX_TCP_SEG) {
-                       hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
-                               VIRTIO_NET_HDR_GSO_TCPV6 :
-                               VIRTIO_NET_HDR_GSO_TCPV4;
-                       hdr->gso_size = cookie->tso_segsz;
-                       hdr->hdr_len =
-                               cookie->l2_len +
-                               cookie->l3_len +
-                               cookie->l4_len;
-               } else {
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
-               }
-       }
-}
 
 static inline void
 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
@@ -627,7 +433,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
        struct vring_desc *start_dp;
        struct virtio_net_hdr *hdr;
        uint16_t idx;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
        uint16_t i = 0;
 
        idx = vq->vq_desc_head_idx;
@@ -640,8 +446,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                dxp->ndescs = 1;
                virtio_update_packet_stats(&txvq->stats, cookies[i]);
 
-               hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
-                       cookies[i]->data_off - head_size;
+               hdr = rte_pktmbuf_mtod_offset(cookies[i],
+                               struct virtio_net_hdr *, -head_size);
 
                /* if offload disabled, hdr is not zeroed yet, do it now */
                if (!vq->hw->has_tx_offload)
@@ -649,7 +455,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                else
                        virtqueue_xmit_offload(hdr, cookies[i], true);
 
-               start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
+               start_dp[idx].addr  =
+                       VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
                start_dp[idx].len   = cookies[i]->data_len + head_size;
                start_dp[idx].flags = 0;
 
@@ -673,7 +480,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        struct vring_packed_desc *dp;
        struct vq_desc_extra *dxp;
        uint16_t idx, id, flags;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
        struct virtio_net_hdr *hdr;
 
        id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
@@ -687,8 +494,8 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        flags = vq->vq_packed.cached_flags;
 
        /* prepend cannot fail, checked by caller */
-       hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-               cookie->data_off - head_size;
+       hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+                                     -head_size);
 
        /* if offload disabled, hdr is not zeroed yet, do it now */
        if (!vq->hw->has_tx_offload)
@@ -696,7 +503,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        else
                virtqueue_xmit_offload(hdr, cookie, true);
 
-       dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+       dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
        dp->len  = cookie->data_len + head_size;
        dp->id   = id;
 
@@ -713,103 +520,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
                        vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
        }
 
-       virtio_wmb(vq->hw->weak_barriers);
-       dp->flags = flags;
-}
-
-static inline void
-virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
-                             uint16_t needed, int can_push, int in_order)
-{
-       struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
-       struct vq_desc_extra *dxp;
-       struct virtqueue *vq = txvq->vq;
-       struct vring_packed_desc *start_dp, *head_dp;
-       uint16_t idx, id, head_idx, head_flags;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
-       struct virtio_net_hdr *hdr;
-       uint16_t prev;
-       bool prepend_header = false;
-
-       id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
-
-       dxp = &vq->vq_descx[id];
-       dxp->ndescs = needed;
-       dxp->cookie = cookie;
-
-       head_idx = vq->vq_avail_idx;
-       idx = head_idx;
-       prev = head_idx;
-       start_dp = vq->vq_packed.ring.desc;
-
-       head_dp = &vq->vq_packed.ring.desc[idx];
-       head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
-       head_flags |= vq->vq_packed.cached_flags;
-
-       if (can_push) {
-               /* prepend cannot fail, checked by caller */
-               hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-                       cookie->data_off - head_size;
-               prepend_header = true;
-
-               /* if offload disabled, it is not zeroed below, do it now */
-               if (!vq->hw->has_tx_offload)
-                       virtqueue_clear_net_hdr(hdr);
-       } else {
-               /* setup first tx ring slot to point to header
-                * stored in reserved region.
-                */
-               start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
-                       RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
-               start_dp[idx].len   = vq->hw->vtnet_hdr_size;
-               hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
-               idx++;
-               if (idx >= vq->vq_nentries) {
-                       idx -= vq->vq_nentries;
-                       vq->vq_packed.cached_flags ^=
-                               VRING_PACKED_DESC_F_AVAIL_USED;
-               }
-       }
-
-       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
-
-       do {
-               uint16_t flags;
-
-               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
-               start_dp[idx].len  = cookie->data_len;
-               if (prepend_header) {
-                       start_dp[idx].len += head_size;
-                       prepend_header = false;
-               }
-
-               if (likely(idx != head_idx)) {
-                       flags = cookie->next ? VRING_DESC_F_NEXT : 0;
-                       flags |= vq->vq_packed.cached_flags;
-                       start_dp[idx].flags = flags;
-               }
-               prev = idx;
-               idx++;
-               if (idx >= vq->vq_nentries) {
-                       idx -= vq->vq_nentries;
-                       vq->vq_packed.cached_flags ^=
-                               VRING_PACKED_DESC_F_AVAIL_USED;
-               }
-       } while ((cookie = cookie->next) != NULL);
-
-       start_dp[prev].id = id;
-
-       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
-       vq->vq_avail_idx = idx;
-
-       if (!in_order) {
-               vq->vq_desc_head_idx = dxp->next;
-               if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
-                       vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
-       }
-
-       virtio_wmb(vq->hw->weak_barriers);
-       head_dp->flags = head_flags;
+       virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
 }
 
 static inline void
@@ -823,7 +534,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        struct vring_desc *start_dp;
        uint16_t seg_num = cookie->nb_segs;
        uint16_t head_idx, idx;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
        bool prepend_header = false;
        struct virtio_net_hdr *hdr;
 
@@ -840,8 +551,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
        if (can_push) {
                /* prepend cannot fail, checked by caller */
-               hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-                       cookie->data_off - head_size;
+               hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+                                             -head_size);
                prepend_header = true;
 
                /* if offload disabled, it is not zeroed below, do it now */
@@ -882,6 +593,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
                start_dp[idx].len   = cookie->data_len;
                if (prepend_header) {
+                       start_dp[idx].addr -= head_size;
                        start_dp[idx].len += head_size;
                        prepend_header = false;
                }
@@ -919,16 +631,44 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        uint16_t queue_idx,
                        uint16_t nb_desc,
                        unsigned int socket_id __rte_unused,
-                       const struct rte_eth_rxconf *rx_conf __rte_unused,
+                       const struct rte_eth_rxconf *rx_conf,
                        struct rte_mempool *mp)
 {
        uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
        struct virtio_hw *hw = dev->data->dev_private;
        struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
        struct virtnet_rx *rxvq;
+       uint16_t rx_free_thresh;
 
        PMD_INIT_FUNC_TRACE();
 
+       if (rx_conf->rx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
+               return -EINVAL;
+       }
+
+       rx_free_thresh = rx_conf->rx_free_thresh;
+       if (rx_free_thresh == 0)
+               rx_free_thresh =
+                       RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
+
+       if (rx_free_thresh & 0x3) {
+               RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
+                       " (rx_free_thresh=%u port=%u queue=%u)\n",
+                       rx_free_thresh, dev->data->port_id, queue_idx);
+               return -EINVAL;
+       }
+
+       if (rx_free_thresh >= vq->vq_nentries) {
+               RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
+                       "number of RX entries (%u)."
+                       " (rx_free_thresh=%u port=%u queue=%u)\n",
+                       vq->vq_nentries,
+                       rx_free_thresh, dev->data->port_id, queue_idx);
+               return -EINVAL;
+       }
+       vq->vq_free_thresh = rx_free_thresh;
+
        if (nb_desc == 0 || nb_desc > vq->vq_nentries)
                nb_desc = vq->vq_nentries;
        vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@ -951,13 +691,14 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
        struct rte_mbuf *m;
        uint16_t desc_idx;
        int error, nbufs, i;
+       bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
 
        PMD_INIT_FUNC_TRACE();
 
        /* Allocate blank mbufs for the each rx descriptor */
        nbufs = 0;
 
-       if (hw->use_simple_rx) {
+       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
                for (desc_idx = 0; desc_idx < vq->vq_nentries;
                     desc_idx++) {
                        vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
@@ -975,12 +716,12 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                        &rxvq->fake_mbuf;
        }
 
-       if (hw->use_simple_rx) {
+       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
                while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
                        virtio_rxq_rearm_vec(rxvq);
                        nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
                }
-       } else if (hw->use_inorder_rx) {
+       } else if (!vtpci_packed_queue(vq->hw) && in_order) {
                if ((!virtqueue_full(vq))) {
                        uint16_t free_cnt = vq->vq_free_cnt;
                        struct rte_mbuf *pkts[free_cnt];
@@ -1052,6 +793,11 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
+       if (tx_conf->tx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
+               return -EINVAL;
+       }
+
        if (nb_desc == 0 || nb_desc > vq->vq_nentries)
                nb_desc = vq->vq_nentries;
        vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@ -1065,7 +811,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
 
        if (tx_free_thresh >= (vq->vq_nentries - 3)) {
-               RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
+               PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
                        "number of TX entries minus 3 (%u)."
                        " (tx_free_thresh=%u port=%u queue=%u)\n",
                        vq->vq_nentries - 3,
@@ -1090,7 +836,7 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
        PMD_INIT_FUNC_TRACE();
 
        if (!vtpci_packed_queue(hw)) {
-               if (hw->use_inorder_tx)
+               if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
                        vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
        }
 
@@ -1113,7 +859,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
                error = virtqueue_enqueue_recv_refill(vq, &m, 1);
 
        if (unlikely(error)) {
-               RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+               PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
                rte_pktmbuf_free(m);
        }
 }
@@ -1125,7 +871,7 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
 
        error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
        if (unlikely(error)) {
-               RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+               PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
                rte_pktmbuf_free(m);
        }
 }
@@ -1202,7 +948,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
        return 0;
 }
 
-#define VIRTIO_MBUF_BURST_SZ 64
 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
 uint16_t
 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
@@ -2003,7 +1748,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
        struct virtio_hw *hw = vq->hw;
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_tx = 0;
-       bool in_order = hw->use_inorder_tx;
+       bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;
@@ -2285,3 +2030,13 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 
        return nb_tx;
 }
+
+#ifndef CC_AVX512_SUPPORT
+uint16_t
+virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
+                           struct rte_mbuf **rx_pkts __rte_unused,
+                           uint16_t nb_pkts __rte_unused)
+{
+       return 0;
+}
+#endif /* ifndef CC_AVX512_SUPPORT */