1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
13 #include "virtio_logs.h"
14 #include "virtio_ethdev.h"
15 #include "virtio_pci.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
19 #ifdef CC_AVX512_SUPPORT
20 #include "virtio_rxtx_packed_avx.h"
24 virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
27 struct virtnet_tx *txvq = tx_queue;
28 struct virtqueue *vq = txvq->vq;
29 struct virtio_hw *hw = vq->hw;
33 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
36 if (unlikely(nb_pkts < 1))
39 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
41 if (vq->vq_free_cnt <= vq->vq_nentries - vq->vq_free_thresh)
42 virtio_xmit_cleanup_inorder_packed(vq, vq->vq_free_thresh);
44 remained = RTE_MIN(nb_pkts, vq->vq_free_cnt);
47 if (remained >= PACKED_BATCH_SIZE) {
48 if (!virtqueue_enqueue_batch_packed_vec(txvq,
50 nb_tx += PACKED_BATCH_SIZE;
51 remained -= PACKED_BATCH_SIZE;
55 if (!virtqueue_enqueue_single_packed_vec(txvq,
64 txvq->stats.packets += nb_tx;
67 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
69 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
77 virtio_recv_pkts_packed_vec(void *rx_queue,
78 struct rte_mbuf **rx_pkts,
81 struct virtnet_rx *rxvq = rx_queue;
82 struct virtqueue *vq = rxvq->vq;
83 struct virtio_hw *hw = vq->hw;
84 uint16_t num, nb_rx = 0;
85 uint32_t nb_enqueued = 0;
86 uint16_t free_cnt = vq->vq_free_thresh;
88 if (unlikely(hw->started == 0))
91 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
92 if (likely(num > PACKED_BATCH_SIZE))
93 num = num - ((vq->vq_used_cons_idx + num) % PACKED_BATCH_SIZE);
96 if (!virtqueue_dequeue_batch_packed_vec(rxvq,
98 nb_rx += PACKED_BATCH_SIZE;
99 num -= PACKED_BATCH_SIZE;
102 if (!virtqueue_dequeue_single_packed_vec(rxvq,
111 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
113 rxvq->stats.packets += nb_rx;
115 if (likely(vq->vq_free_cnt >= free_cnt)) {
116 struct rte_mbuf *new_pkts[free_cnt];
117 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
119 virtio_recv_refill_packed_vec(rxvq, new_pkts,
121 nb_enqueued += free_cnt;
123 struct rte_eth_dev *dev =
124 &rte_eth_devices[rxvq->port_id];
125 dev->data->rx_mbuf_alloc_failed += free_cnt;
129 if (likely(nb_enqueued)) {
130 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
131 virtqueue_notify(vq);
132 PMD_RX_LOG(DEBUG, "Notified");