net/i40e: fix bitmap free
[dpdk.git] / drivers / net / virtio / virtio_rxtx_simple_neon.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10
11 #include <rte_byteorder.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_cycles.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_errno.h>
17 #include <rte_memory.h>
18 #include <rte_mempool.h>
19 #include <rte_malloc.h>
20 #include <rte_mbuf.h>
21 #include <rte_prefetch.h>
22 #include <rte_string_fns.h>
23 #include <rte_vect.h>
24
25 #include "virtio_rxtx_simple.h"
26
27 #define RTE_VIRTIO_DESC_PER_LOOP 8
28
29 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
30  *
31  * This routine is for non-mergeable RX, one desc for each guest buffer.
32  * This routine is based on the RX ring layout optimization. Each entry in the
33  * avail ring points to the desc with the same index in the desc ring and this
34  * will never be changed in the driver.
35  *
36  * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
37  */
38 uint16_t
39 virtio_recv_pkts_vec(void *rx_queue,
40                 struct rte_mbuf **__rte_restrict rx_pkts,
41                 uint16_t nb_pkts)
42 {
43         struct virtnet_rx *rxvq = rx_queue;
44         struct virtqueue *vq = rxvq->vq;
45         struct virtio_hw *hw = vq->hw;
46         uint16_t nb_used, nb_total;
47         uint16_t desc_idx;
48         struct vring_used_elem *rused;
49         struct rte_mbuf **sw_ring;
50         struct rte_mbuf **sw_ring_end;
51         struct rte_mbuf **ref_rx_pkts;
52         uint16_t nb_pkts_received = 0;
53
54         uint8x16_t shuf_msk1 = {
55                 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
56                 4, 5, 0xFF, 0xFF,       /* pkt len */
57                 4, 5,                   /* dat len */
58                 0xFF, 0xFF,             /* vlan tci */
59                 0xFF, 0xFF, 0xFF, 0xFF
60         };
61
62         uint8x16_t shuf_msk2 = {
63                 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
64                 12, 13, 0xFF, 0xFF,     /* pkt len */
65                 12, 13,                 /* dat len */
66                 0xFF, 0xFF,             /* vlan tci */
67                 0xFF, 0xFF, 0xFF, 0xFF
68         };
69
70         /* Subtract the header length.
71          *  In which case do we need the header length in used->len ?
72          */
73         uint16x8_t len_adjust = {
74                 0, 0,
75                 (uint16_t)hw->vtnet_hdr_size, 0,
76                 (uint16_t)hw->vtnet_hdr_size,
77                 0,
78                 0, 0
79         };
80
81         if (unlikely(hw->started == 0))
82                 return nb_pkts_received;
83
84         if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
85                 return 0;
86
87         /* virtqueue_nused has a load-acquire or rte_cio_rmb inside */
88         nb_used = virtqueue_nused(vq);
89
90         if (unlikely(nb_used == 0))
91                 return 0;
92
93         nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
94         nb_used = RTE_MIN(nb_used, nb_pkts);
95
96         desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
97         rused = &vq->vq_split.ring.used->ring[desc_idx];
98         sw_ring  = &vq->sw_ring[desc_idx];
99         sw_ring_end = &vq->sw_ring[vq->vq_nentries];
100
101         rte_prefetch_non_temporal(rused);
102
103         if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
104                 virtio_rxq_rearm_vec(rxvq);
105                 if (unlikely(virtqueue_kick_prepare(vq)))
106                         virtqueue_notify(vq);
107         }
108
109         nb_total = nb_used;
110         ref_rx_pkts = rx_pkts;
111         for (nb_pkts_received = 0;
112                 nb_pkts_received < nb_total;) {
113                 uint64x2_t desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
114                 uint64x2_t mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
115                 uint64x2_t pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
116
117                 mbp[0] = vld1q_u64((uint64_t *)(sw_ring + 0));
118                 desc[0] = vld1q_u64((uint64_t *)(rused + 0));
119                 vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0]);
120
121                 mbp[1] = vld1q_u64((uint64_t *)(sw_ring + 2));
122                 desc[1] = vld1q_u64((uint64_t *)(rused + 2));
123                 vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1]);
124
125                 mbp[2] = vld1q_u64((uint64_t *)(sw_ring + 4));
126                 desc[2] = vld1q_u64((uint64_t *)(rused + 4));
127                 vst1q_u64((uint64_t *)&rx_pkts[4], mbp[2]);
128
129                 mbp[3] = vld1q_u64((uint64_t *)(sw_ring + 6));
130                 desc[3] = vld1q_u64((uint64_t *)(rused + 6));
131                 vst1q_u64((uint64_t *)&rx_pkts[6], mbp[3]);
132
133                 pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
134                                 vreinterpretq_u8_u64(desc[0]), shuf_msk2));
135                 pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
136                                 vreinterpretq_u8_u64(desc[0]), shuf_msk1));
137                 pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
138                                 vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
139                 pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
140                                 vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
141                 vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1,
142                         pkt_mb[1]);
143                 vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1,
144                         pkt_mb[0]);
145
146                 pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
147                                 vreinterpretq_u8_u64(desc[1]), shuf_msk2));
148                 pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
149                                 vreinterpretq_u8_u64(desc[1]), shuf_msk1));
150                 pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
151                                 vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
152                 pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
153                                 vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
154                 vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1,
155                         pkt_mb[3]);
156                 vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1,
157                         pkt_mb[2]);
158
159                 pkt_mb[5] = vreinterpretq_u64_u8(vqtbl1q_u8(
160                                 vreinterpretq_u8_u64(desc[2]), shuf_msk2));
161                 pkt_mb[4] = vreinterpretq_u64_u8(vqtbl1q_u8(
162                                 vreinterpretq_u8_u64(desc[2]), shuf_msk1));
163                 pkt_mb[5] = vreinterpretq_u64_u16(vsubq_u16(
164                                 vreinterpretq_u16_u64(pkt_mb[5]), len_adjust));
165                 pkt_mb[4] = vreinterpretq_u64_u16(vsubq_u16(
166                                 vreinterpretq_u16_u64(pkt_mb[4]), len_adjust));
167                 vst1q_u64((void *)&rx_pkts[5]->rx_descriptor_fields1,
168                         pkt_mb[5]);
169                 vst1q_u64((void *)&rx_pkts[4]->rx_descriptor_fields1,
170                         pkt_mb[4]);
171
172                 pkt_mb[7] = vreinterpretq_u64_u8(vqtbl1q_u8(
173                                 vreinterpretq_u8_u64(desc[3]), shuf_msk2));
174                 pkt_mb[6] = vreinterpretq_u64_u8(vqtbl1q_u8(
175                                 vreinterpretq_u8_u64(desc[3]), shuf_msk1));
176                 pkt_mb[7] = vreinterpretq_u64_u16(vsubq_u16(
177                                 vreinterpretq_u16_u64(pkt_mb[7]), len_adjust));
178                 pkt_mb[6] = vreinterpretq_u64_u16(vsubq_u16(
179                                 vreinterpretq_u16_u64(pkt_mb[6]), len_adjust));
180                 vst1q_u64((void *)&rx_pkts[7]->rx_descriptor_fields1,
181                         pkt_mb[7]);
182                 vst1q_u64((void *)&rx_pkts[6]->rx_descriptor_fields1,
183                         pkt_mb[6]);
184
185                 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
186                         if (sw_ring + nb_used <= sw_ring_end)
187                                 nb_pkts_received += nb_used;
188                         else
189                                 nb_pkts_received += sw_ring_end - sw_ring;
190                         break;
191                 } else {
192                         if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
193                                 sw_ring_end)) {
194                                 nb_pkts_received += sw_ring_end - sw_ring;
195                                 break;
196                         } else {
197                                 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
198
199                                 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
200                                 sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
201                                 rused   += RTE_VIRTIO_DESC_PER_LOOP;
202                                 nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
203                         }
204                 }
205         }
206
207         vq->vq_used_cons_idx += nb_pkts_received;
208         vq->vq_free_cnt += nb_pkts_received;
209         rxvq->stats.packets += nb_pkts_received;
210         for (nb_used = 0; nb_used < nb_pkts_received; nb_used++)
211                 virtio_update_packet_stats(&rxvq->stats, ref_rx_pkts[nb_used]);
212
213         return nb_pkts_received;
214 }