net/virtio: add election for packed vector NEON path
[dpdk.git] / drivers / net / virtio / virtio_rxtx_packed.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2020 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_net.h>
12
13 #include "virtio_logs.h"
14 #include "virtio_ethdev.h"
15 #include "virtio_pci.h"
16 #include "virtio_rxtx_packed.h"
17 #include "virtqueue.h"
18
19 #ifdef CC_AVX512_SUPPORT
20 #include "virtio_rxtx_packed_avx.h"
21 #elif defined(RTE_ARCH_ARM)
22 #include "virtio_rxtx_packed_neon.h"
23 #endif
24
25 uint16_t
26 virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
27                         uint16_t nb_pkts)
28 {
29         struct virtnet_tx *txvq = tx_queue;
30         struct virtqueue *vq = txvq->vq;
31         struct virtio_hw *hw = vq->hw;
32         uint16_t nb_tx = 0;
33         uint16_t remained;
34
35         if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
36                 return nb_tx;
37
38         if (unlikely(nb_pkts < 1))
39                 return nb_pkts;
40
41         PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
42
43         if (vq->vq_free_cnt <= vq->vq_nentries - vq->vq_free_thresh)
44                 virtio_xmit_cleanup_inorder_packed(vq, vq->vq_free_thresh);
45
46         remained = RTE_MIN(nb_pkts, vq->vq_free_cnt);
47
48         while (remained) {
49                 if (remained >= PACKED_BATCH_SIZE) {
50                         if (!virtqueue_enqueue_batch_packed_vec(txvq,
51                                                 &tx_pkts[nb_tx])) {
52                                 nb_tx += PACKED_BATCH_SIZE;
53                                 remained -= PACKED_BATCH_SIZE;
54                                 continue;
55                         }
56                 }
57                 if (!virtqueue_enqueue_single_packed_vec(txvq,
58                                         tx_pkts[nb_tx])) {
59                         nb_tx++;
60                         remained--;
61                         continue;
62                 }
63                 break;
64         };
65
66         txvq->stats.packets += nb_tx;
67
68         if (likely(nb_tx)) {
69                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
70                         virtqueue_notify(vq);
71                         PMD_TX_LOG(DEBUG, "Notified backend after xmit");
72                 }
73         }
74
75         return nb_tx;
76 }
77
78 uint16_t
79 virtio_recv_pkts_packed_vec(void *rx_queue,
80                             struct rte_mbuf **rx_pkts,
81                             uint16_t nb_pkts)
82 {
83         struct virtnet_rx *rxvq = rx_queue;
84         struct virtqueue *vq = rxvq->vq;
85         struct virtio_hw *hw = vq->hw;
86         uint16_t num, nb_rx = 0;
87         uint32_t nb_enqueued = 0;
88         uint16_t free_cnt = vq->vq_free_thresh;
89
90         if (unlikely(hw->started == 0))
91                 return nb_rx;
92
93         num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
94         if (likely(num > PACKED_BATCH_SIZE))
95                 num = num - ((vq->vq_used_cons_idx + num) % PACKED_BATCH_SIZE);
96
97         while (num) {
98                 if (!virtqueue_dequeue_batch_packed_vec(rxvq,
99                                         &rx_pkts[nb_rx])) {
100                         nb_rx += PACKED_BATCH_SIZE;
101                         num -= PACKED_BATCH_SIZE;
102                         continue;
103                 }
104                 if (!virtqueue_dequeue_single_packed_vec(rxvq,
105                                         &rx_pkts[nb_rx])) {
106                         nb_rx++;
107                         num--;
108                         continue;
109                 }
110                 break;
111         };
112
113         PMD_RX_LOG(DEBUG, "dequeue:%d", num);
114
115         rxvq->stats.packets += nb_rx;
116
117         if (likely(vq->vq_free_cnt >= free_cnt)) {
118                 struct rte_mbuf *new_pkts[free_cnt];
119                 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
120                                                 free_cnt) == 0)) {
121                         virtio_recv_refill_packed_vec(rxvq, new_pkts,
122                                         free_cnt);
123                         nb_enqueued += free_cnt;
124                 } else {
125                         struct rte_eth_dev *dev =
126                                 &rte_eth_devices[rxvq->port_id];
127                         dev->data->rx_mbuf_alloc_failed += free_cnt;
128                 }
129         }
130
131         if (likely(nb_enqueued)) {
132                 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
133                         virtqueue_notify(vq);
134                         PMD_RX_LOG(DEBUG, "Notified");
135                 }
136         }
137
138         return nb_rx;
139 }