1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
13 #include "virtio_logs.h"
14 #include "virtio_ethdev.h"
15 #include "virtio_pci.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
19 #ifdef CC_AVX512_SUPPORT
20 #include "virtio_rxtx_packed_avx.h"
21 #elif defined(RTE_ARCH_ARM)
22 #include "virtio_rxtx_packed_neon.h"
26 virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
29 struct virtnet_tx *txvq = tx_queue;
30 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
31 struct virtio_hw *hw = vq->hw;
35 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
38 if (unlikely(nb_pkts < 1))
41 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
43 if (vq->vq_free_cnt <= vq->vq_nentries - vq->vq_free_thresh)
44 virtio_xmit_cleanup_inorder_packed(vq, vq->vq_free_thresh);
46 remained = RTE_MIN(nb_pkts, vq->vq_free_cnt);
49 if (remained >= PACKED_BATCH_SIZE) {
50 if (!virtqueue_enqueue_batch_packed_vec(txvq,
52 nb_tx += PACKED_BATCH_SIZE;
53 remained -= PACKED_BATCH_SIZE;
57 if (!virtqueue_enqueue_single_packed_vec(txvq,
66 txvq->stats.packets += nb_tx;
69 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
71 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
79 virtio_recv_pkts_packed_vec(void *rx_queue,
80 struct rte_mbuf **rx_pkts,
83 struct virtnet_rx *rxvq = rx_queue;
84 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
85 struct virtio_hw *hw = vq->hw;
86 uint16_t num, nb_rx = 0;
87 uint32_t nb_enqueued = 0;
88 uint16_t free_cnt = vq->vq_free_thresh;
90 if (unlikely(hw->started == 0))
93 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
94 if (likely(num > PACKED_BATCH_SIZE))
95 num = num - ((vq->vq_used_cons_idx + num) % PACKED_BATCH_SIZE);
98 if (num >= PACKED_BATCH_SIZE) {
99 if (!virtqueue_dequeue_batch_packed_vec(rxvq,
101 nb_rx += PACKED_BATCH_SIZE;
102 num -= PACKED_BATCH_SIZE;
106 if (!virtqueue_dequeue_single_packed_vec(rxvq,
115 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
117 rxvq->stats.packets += nb_rx;
119 if (likely(vq->vq_free_cnt >= free_cnt)) {
120 struct rte_mbuf *new_pkts[free_cnt];
121 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
123 virtio_recv_refill_packed_vec(rxvq, new_pkts,
125 nb_enqueued += free_cnt;
127 struct rte_eth_dev *dev =
128 &rte_eth_devices[rxvq->port_id];
129 dev->data->rx_mbuf_alloc_failed += free_cnt;
133 if (likely(nb_enqueued)) {
134 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
135 virtqueue_notify(vq);
136 PMD_RX_LOG(DEBUG, "Notified");