X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=f915b8a2c993a4131b2dfa696cbe01950c1c3b46;hb=28b0514504cf0f71f822f0aed5aacc83eb8ada12;hp=b5fc4ecbe19286cd3731cbf04d4ee34cd3074168;hpb=be048a1aaa7276cc99a5a6a30e7825cfcb523112;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index b5fc4ecbe1..f915b8a2c9 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -45,7 +45,7 @@ virtio_dev_rx_queue_done(void *rxq, uint16_t offset) struct virtnet_rx *rxvq = rxq; struct virtqueue *vq = rxvq->vq; - return VIRTQUEUE_NUSED(vq) >= offset; + return virtqueue_nused(vq) >= offset; } void @@ -89,23 +89,6 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) dp->next = VQ_RING_DESC_CHAIN_END; } -static void -vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) -{ - struct vq_desc_extra *dxp; - - dxp = &vq->vq_descx[id]; - vq->vq_free_cnt += dxp->ndescs; - - if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) - vq->vq_desc_head_idx = id; - else - vq->vq_descx[vq->vq_desc_tail_idx].next = id; - - vq->vq_desc_tail_idx = id; - dxp->next = VQ_RING_DESC_CHAIN_END; -} - void virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) { @@ -264,130 +247,6 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq, return i; } -#ifndef DEFAULT_TX_FREE_THRESH -#define DEFAULT_TX_FREE_THRESH 32 -#endif - -static void -virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) -{ - uint16_t used_idx, id, curr_id, free_cnt = 0; - uint16_t size = vq->vq_nentries; - struct vring_packed_desc *desc = vq->vq_packed.ring.desc; - struct vq_desc_extra *dxp; - - used_idx = vq->vq_used_cons_idx; - /* desc_is_used has a load-acquire or rte_cio_rmb inside - * and wait for used desc in virtqueue. - */ - while (num > 0 && desc_is_used(&desc[used_idx], vq)) { - id = desc[used_idx].id; - do { - curr_id = used_idx; - dxp = &vq->vq_descx[used_idx]; - used_idx += dxp->ndescs; - free_cnt += dxp->ndescs; - num -= dxp->ndescs; - if (used_idx >= size) { - used_idx -= size; - vq->vq_packed.used_wrap_counter ^= 1; - } - if (dxp->cookie != NULL) { - rte_pktmbuf_free(dxp->cookie); - dxp->cookie = NULL; - } - } while (curr_id != id); - } - vq->vq_used_cons_idx = used_idx; - vq->vq_free_cnt += free_cnt; -} - -static void -virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) -{ - uint16_t used_idx, id; - uint16_t size = vq->vq_nentries; - struct vring_packed_desc *desc = vq->vq_packed.ring.desc; - struct vq_desc_extra *dxp; - - used_idx = vq->vq_used_cons_idx; - /* desc_is_used has a load-acquire or rte_cio_rmb inside - * and wait for used desc in virtqueue. - */ - while (num-- && desc_is_used(&desc[used_idx], vq)) { - id = desc[used_idx].id; - dxp = &vq->vq_descx[id]; - vq->vq_used_cons_idx += dxp->ndescs; - if (vq->vq_used_cons_idx >= size) { - vq->vq_used_cons_idx -= size; - vq->vq_packed.used_wrap_counter ^= 1; - } - vq_ring_free_id_packed(vq, id); - if (dxp->cookie != NULL) { - rte_pktmbuf_free(dxp->cookie); - dxp->cookie = NULL; - } - used_idx = vq->vq_used_cons_idx; - } -} - -/* Cleanup from completed transmits. */ -static inline void -virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order) -{ - if (in_order) - virtio_xmit_cleanup_inorder_packed(vq, num); - else - virtio_xmit_cleanup_normal_packed(vq, num); -} - -static void -virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) -{ - uint16_t i, used_idx, desc_idx; - for (i = 0; i < num; i++) { - struct vring_used_elem *uep; - struct vq_desc_extra *dxp; - - used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_split.ring.used->ring[used_idx]; - - desc_idx = (uint16_t) uep->id; - dxp = &vq->vq_descx[desc_idx]; - vq->vq_used_cons_idx++; - vq_ring_free_chain(vq, desc_idx); - - if (dxp->cookie != NULL) { - rte_pktmbuf_free(dxp->cookie); - dxp->cookie = NULL; - } - } -} - -/* Cleanup from completed inorder transmits. */ -static __rte_always_inline void -virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num) -{ - uint16_t i, idx = vq->vq_used_cons_idx; - int16_t free_cnt = 0; - struct vq_desc_extra *dxp = NULL; - - if (unlikely(num == 0)) - return; - - for (i = 0; i < num; i++) { - dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)]; - free_cnt += dxp->ndescs; - if (dxp->cookie != NULL) { - rte_pktmbuf_free(dxp->cookie); - dxp->cookie = NULL; - } - } - - vq->vq_free_cnt += free_cnt; - vq->vq_used_cons_idx = idx; -} - static inline int virtqueue_enqueue_refill_inorder(struct virtqueue *vq, struct rte_mbuf **cookies, @@ -562,68 +421,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m) } -/* avoid write operation when necessary, to lessen cache issues */ -#define ASSIGN_UNLESS_EQUAL(var, val) do { \ - if ((var) != (val)) \ - (var) = (val); \ -} while (0) - -#define virtqueue_clear_net_hdr(_hdr) do { \ - ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0); \ - ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0); \ - ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0); \ - ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0); \ - ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0); \ - ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0); \ -} while (0) - -static inline void -virtqueue_xmit_offload(struct virtio_net_hdr *hdr, - struct rte_mbuf *cookie, - bool offload) -{ - if (offload) { - if (cookie->ol_flags & PKT_TX_TCP_SEG) - cookie->ol_flags |= PKT_TX_TCP_CKSUM; - - switch (cookie->ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_UDP_CKSUM: - hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct rte_udp_hdr, - dgram_cksum); - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - break; - - case PKT_TX_TCP_CKSUM: - hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum); - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - break; - - default: - ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); - ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); - ASSIGN_UNLESS_EQUAL(hdr->flags, 0); - break; - } - /* TCP Segmentation Offload */ - if (cookie->ol_flags & PKT_TX_TCP_SEG) { - hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? - VIRTIO_NET_HDR_GSO_TCPV6 : - VIRTIO_NET_HDR_GSO_TCPV4; - hdr->gso_size = cookie->tso_segsz; - hdr->hdr_len = - cookie->l2_len + - cookie->l3_len + - cookie->l4_len; - } else { - ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); - ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); - ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); - } - } -} static inline void virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, @@ -635,7 +433,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, struct vring_desc *start_dp; struct virtio_net_hdr *hdr; uint16_t idx; - uint16_t head_size = vq->hw->vtnet_hdr_size; + int16_t head_size = vq->hw->vtnet_hdr_size; uint16_t i = 0; idx = vq->vq_desc_head_idx; @@ -648,8 +446,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, dxp->ndescs = 1; virtio_update_packet_stats(&txvq->stats, cookies[i]); - hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr + - cookies[i]->data_off - head_size; + hdr = rte_pktmbuf_mtod_offset(cookies[i], + struct virtio_net_hdr *, -head_size); /* if offload disabled, hdr is not zeroed yet, do it now */ if (!vq->hw->has_tx_offload) @@ -682,7 +480,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, struct vring_packed_desc *dp; struct vq_desc_extra *dxp; uint16_t idx, id, flags; - uint16_t head_size = vq->hw->vtnet_hdr_size; + int16_t head_size = vq->hw->vtnet_hdr_size; struct virtio_net_hdr *hdr; id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; @@ -696,8 +494,8 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, flags = vq->vq_packed.cached_flags; /* prepend cannot fail, checked by caller */ - hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr + - cookie->data_off - head_size; + hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *, + -head_size); /* if offload disabled, hdr is not zeroed yet, do it now */ if (!vq->hw->has_tx_offload) @@ -725,102 +523,6 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers); } -static inline void -virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, - uint16_t needed, int can_push, int in_order) -{ - struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; - struct vq_desc_extra *dxp; - struct virtqueue *vq = txvq->vq; - struct vring_packed_desc *start_dp, *head_dp; - uint16_t idx, id, head_idx, head_flags; - uint16_t head_size = vq->hw->vtnet_hdr_size; - struct virtio_net_hdr *hdr; - uint16_t prev; - bool prepend_header = false; - - id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; - - dxp = &vq->vq_descx[id]; - dxp->ndescs = needed; - dxp->cookie = cookie; - - head_idx = vq->vq_avail_idx; - idx = head_idx; - prev = head_idx; - start_dp = vq->vq_packed.ring.desc; - - head_dp = &vq->vq_packed.ring.desc[idx]; - head_flags = cookie->next ? VRING_DESC_F_NEXT : 0; - head_flags |= vq->vq_packed.cached_flags; - - if (can_push) { - /* prepend cannot fail, checked by caller */ - hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr + - cookie->data_off - head_size; - prepend_header = true; - - /* if offload disabled, it is not zeroed below, do it now */ - if (!vq->hw->has_tx_offload) - virtqueue_clear_net_hdr(hdr); - } else { - /* setup first tx ring slot to point to header - * stored in reserved region. - */ - start_dp[idx].addr = txvq->virtio_net_hdr_mem + - RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); - start_dp[idx].len = vq->hw->vtnet_hdr_size; - hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; - idx++; - if (idx >= vq->vq_nentries) { - idx -= vq->vq_nentries; - vq->vq_packed.cached_flags ^= - VRING_PACKED_DESC_F_AVAIL_USED; - } - } - - virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); - - do { - uint16_t flags; - - start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); - start_dp[idx].len = cookie->data_len; - if (prepend_header) { - start_dp[idx].addr -= head_size; - start_dp[idx].len += head_size; - prepend_header = false; - } - - if (likely(idx != head_idx)) { - flags = cookie->next ? VRING_DESC_F_NEXT : 0; - flags |= vq->vq_packed.cached_flags; - start_dp[idx].flags = flags; - } - prev = idx; - idx++; - if (idx >= vq->vq_nentries) { - idx -= vq->vq_nentries; - vq->vq_packed.cached_flags ^= - VRING_PACKED_DESC_F_AVAIL_USED; - } - } while ((cookie = cookie->next) != NULL); - - start_dp[prev].id = id; - - vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); - vq->vq_avail_idx = idx; - - if (!in_order) { - vq->vq_desc_head_idx = dxp->next; - if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; - } - - virtqueue_store_flags_packed(head_dp, head_flags, - vq->hw->weak_barriers); -} - static inline void virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed, int use_indirect, int can_push, @@ -832,7 +534,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, struct vring_desc *start_dp; uint16_t seg_num = cookie->nb_segs; uint16_t head_idx, idx; - uint16_t head_size = vq->hw->vtnet_hdr_size; + int16_t head_size = vq->hw->vtnet_hdr_size; bool prepend_header = false; struct virtio_net_hdr *hdr; @@ -849,8 +551,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, if (can_push) { /* prepend cannot fail, checked by caller */ - hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr + - cookie->data_off - head_size; + hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *, + -head_size); prepend_header = true; /* if offload disabled, it is not zeroed below, do it now */ @@ -929,16 +631,44 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id __rte_unused, - const struct rte_eth_rxconf *rx_conf __rte_unused, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; struct virtio_hw *hw = dev->data->dev_private; struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_rx *rxvq; + uint16_t rx_free_thresh; PMD_INIT_FUNC_TRACE(); + if (rx_conf->rx_deferred_start) { + PMD_INIT_LOG(ERR, "Rx deferred start is not supported"); + return -EINVAL; + } + + rx_free_thresh = rx_conf->rx_free_thresh; + if (rx_free_thresh == 0) + rx_free_thresh = + RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH); + + if (rx_free_thresh & 0x3) { + RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four." + " (rx_free_thresh=%u port=%u queue=%u)\n", + rx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + + if (rx_free_thresh >= vq->vq_nentries) { + RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the " + "number of RX entries (%u)." + " (rx_free_thresh=%u port=%u queue=%u)\n", + vq->vq_nentries, + rx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + vq->vq_free_thresh = rx_free_thresh; + if (nb_desc == 0 || nb_desc > vq->vq_nentries) nb_desc = vq->vq_nentries; vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); @@ -961,13 +691,14 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) struct rte_mbuf *m; uint16_t desc_idx; int error, nbufs, i; + bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER); PMD_INIT_FUNC_TRACE(); /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; - if (hw->use_simple_rx) { + if (hw->use_vec_rx && !vtpci_packed_queue(hw)) { for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) { vq->vq_split.ring.avail->ring[desc_idx] = desc_idx; @@ -985,12 +716,12 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) &rxvq->fake_mbuf; } - if (hw->use_simple_rx) { + if (hw->use_vec_rx && !vtpci_packed_queue(hw)) { while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { virtio_rxq_rearm_vec(rxvq); nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH; } - } else if (hw->use_inorder_rx) { + } else if (!vtpci_packed_queue(vq->hw) && in_order) { if ((!virtqueue_full(vq))) { uint16_t free_cnt = vq->vq_free_cnt; struct rte_mbuf *pkts[free_cnt]; @@ -1062,6 +793,11 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); + if (tx_conf->tx_deferred_start) { + PMD_INIT_LOG(ERR, "Tx deferred start is not supported"); + return -EINVAL; + } + if (nb_desc == 0 || nb_desc > vq->vq_nentries) nb_desc = vq->vq_nentries; vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); @@ -1075,7 +811,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH); if (tx_free_thresh >= (vq->vq_nentries - 3)) { - RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the " + PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the " "number of TX entries minus 3 (%u)." " (tx_free_thresh=%u port=%u queue=%u)\n", vq->vq_nentries - 3, @@ -1100,7 +836,7 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (!vtpci_packed_queue(hw)) { - if (hw->use_inorder_tx) + if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0; } @@ -1123,7 +859,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) error = virtqueue_enqueue_recv_refill(vq, &m, 1); if (unlikely(error)) { - RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf"); + PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf"); rte_pktmbuf_free(m); } } @@ -1135,7 +871,7 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m) error = virtqueue_enqueue_refill_inorder(vq, &m, 1); if (unlikely(error)) { - RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf"); + PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf"); rte_pktmbuf_free(m); } } @@ -1212,7 +948,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) return 0; } -#define VIRTIO_MBUF_BURST_SZ 64 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -1233,9 +968,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (unlikely(hw->started == 0)) return nb_rx; - nb_used = VIRTQUEUE_NUSED(vq); - - virtio_rmb(hw->weak_barriers); + nb_used = virtqueue_nused(vq); num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) @@ -1448,12 +1181,10 @@ virtio_recv_pkts_inorder(void *rx_queue, if (unlikely(hw->started == 0)) return nb_rx; - nb_used = VIRTQUEUE_NUSED(vq); + nb_used = virtqueue_nused(vq); nb_used = RTE_MIN(nb_used, nb_pkts); nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ); - virtio_rmb(hw->weak_barriers); - PMD_RX_LOG(DEBUG, "used:%d", nb_used); nb_enqueued = 0; @@ -1542,8 +1273,7 @@ virtio_recv_pkts_inorder(void *rx_queue, uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, VIRTIO_MBUF_BURST_SZ); - if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { - virtio_rmb(hw->weak_barriers); + if (likely(virtqueue_nused(vq) >= rcv_cnt)) { num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, rcv_cnt); uint16_t extra_idx = 0; @@ -1634,9 +1364,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, if (unlikely(hw->started == 0)) return nb_rx; - nb_used = VIRTQUEUE_NUSED(vq); - - virtio_rmb(hw->weak_barriers); + nb_used = virtqueue_nused(vq); PMD_RX_LOG(DEBUG, "used:%d", nb_used); @@ -1724,8 +1452,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, VIRTIO_MBUF_BURST_SZ); - if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { - virtio_rmb(hw->weak_barriers); + if (likely(virtqueue_nused(vq) >= rcv_cnt)) { num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, rcv_cnt); uint16_t extra_idx = 0; @@ -2013,7 +1740,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_tx = 0; - bool in_order = hw->use_inorder_tx; + bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER); if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -2098,9 +1825,9 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_pkts; PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); - nb_used = VIRTQUEUE_NUSED(vq); - virtio_rmb(hw->weak_barriers); + nb_used = virtqueue_nused(vq); + if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) virtio_xmit_cleanup(vq, nb_used); @@ -2132,8 +1859,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Positive value indicates it need free vring descriptors */ if (unlikely(need > 0)) { - nb_used = VIRTQUEUE_NUSED(vq); - virtio_rmb(hw->weak_barriers); + nb_used = virtqueue_nused(vq); + need = RTE_MIN(need, (int)nb_used); virtio_xmit_cleanup(vq, need); @@ -2170,11 +1897,9 @@ static __rte_always_inline int virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need) { uint16_t nb_used, nb_clean, nb_descs; - struct virtio_hw *hw = vq->hw; nb_descs = vq->vq_free_cnt + need; - nb_used = VIRTQUEUE_NUSED(vq); - virtio_rmb(hw->weak_barriers); + nb_used = virtqueue_nused(vq); nb_clean = RTE_MIN(need, (int)nb_used); virtio_xmit_cleanup_inorder(vq, nb_clean); @@ -2203,9 +1928,8 @@ virtio_xmit_pkts_inorder(void *tx_queue, VIRTQUEUE_DUMP(vq); PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); - nb_used = VIRTQUEUE_NUSED(vq); + nb_used = virtqueue_nused(vq); - virtio_rmb(hw->weak_barriers); if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) virtio_xmit_cleanup_inorder(vq, nb_used); @@ -2295,3 +2019,21 @@ virtio_xmit_pkts_inorder(void *tx_queue, return nb_tx; } + +#ifndef CC_AVX512_SUPPORT +uint16_t +virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused, + struct rte_mbuf **rx_pkts __rte_unused, + uint16_t nb_pkts __rte_unused) +{ + return 0; +} + +uint16_t +virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused, + struct rte_mbuf **tx_pkts __rte_unused, + uint16_t nb_pkts __rte_unused) +{ + return 0; +} +#endif /* ifndef CC_AVX512_SUPPORT */