1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc
11 #include <rte_byteorder.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_cycles.h>
14 #include <rte_ether.h>
15 #include <ethdev_driver.h>
16 #include <rte_errno.h>
17 #include <rte_memory.h>
18 #include <rte_mempool.h>
19 #include <rte_malloc.h>
21 #include <rte_prefetch.h>
22 #include <rte_string_fns.h>
25 #include "virtio_rxtx_simple.h"
27 #define RTE_VIRTIO_DESC_PER_LOOP 8
29 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
31 * This routine is for non-mergeable RX, one desc for each guest buffer.
32 * This routine is based on the RX ring layout optimization. Each entry in the
33 * avail ring points to the desc with the same index in the desc ring and this
34 * will never be changed in the driver.
36 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
39 virtio_recv_pkts_vec(void *rx_queue,
40 struct rte_mbuf **__rte_restrict rx_pkts,
43 struct virtnet_rx *rxvq = rx_queue;
44 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
45 struct virtio_hw *hw = vq->hw;
46 uint16_t nb_used, nb_total;
48 struct vring_used_elem *rused;
49 struct rte_mbuf **sw_ring;
50 struct rte_mbuf **sw_ring_end;
51 struct rte_mbuf **ref_rx_pkts;
52 uint16_t nb_pkts_received = 0;
54 uint8x16_t shuf_msk1 = {
55 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
56 4, 5, 0xFF, 0xFF, /* pkt len */
58 0xFF, 0xFF, /* vlan tci */
59 0xFF, 0xFF, 0xFF, 0xFF
62 uint8x16_t shuf_msk2 = {
63 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
64 12, 13, 0xFF, 0xFF, /* pkt len */
66 0xFF, 0xFF, /* vlan tci */
67 0xFF, 0xFF, 0xFF, 0xFF
70 /* Subtract the header length.
71 * In which case do we need the header length in used->len ?
73 uint16x8_t len_adjust = {
75 (uint16_t)hw->vtnet_hdr_size, 0,
76 (uint16_t)hw->vtnet_hdr_size,
81 if (unlikely(hw->started == 0))
82 return nb_pkts_received;
84 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
87 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
88 virtio_rxq_rearm_vec(rxvq);
89 if (unlikely(virtqueue_kick_prepare(vq)))
93 /* virtqueue_nused has a load-acquire or rte_io_rmb inside */
94 nb_used = virtqueue_nused(vq);
96 if (unlikely(nb_used == 0))
99 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
100 nb_used = RTE_MIN(nb_used, nb_pkts);
102 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
103 rused = &vq->vq_split.ring.used->ring[desc_idx];
104 sw_ring = &vq->sw_ring[desc_idx];
105 sw_ring_end = &vq->sw_ring[vq->vq_nentries];
107 rte_prefetch_non_temporal(rused);
110 ref_rx_pkts = rx_pkts;
111 for (nb_pkts_received = 0;
112 nb_pkts_received < nb_total;) {
113 uint64x2_t desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
114 uint64x2_t mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
115 uint64x2_t pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
117 mbp[0] = vld1q_u64((uint64_t *)(sw_ring + 0));
118 desc[0] = vld1q_u64((uint64_t *)(rused + 0));
119 vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0]);
121 mbp[1] = vld1q_u64((uint64_t *)(sw_ring + 2));
122 desc[1] = vld1q_u64((uint64_t *)(rused + 2));
123 vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1]);
125 mbp[2] = vld1q_u64((uint64_t *)(sw_ring + 4));
126 desc[2] = vld1q_u64((uint64_t *)(rused + 4));
127 vst1q_u64((uint64_t *)&rx_pkts[4], mbp[2]);
129 mbp[3] = vld1q_u64((uint64_t *)(sw_ring + 6));
130 desc[3] = vld1q_u64((uint64_t *)(rused + 6));
131 vst1q_u64((uint64_t *)&rx_pkts[6], mbp[3]);
133 pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
134 vreinterpretq_u8_u64(desc[0]), shuf_msk2));
135 pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
136 vreinterpretq_u8_u64(desc[0]), shuf_msk1));
137 pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
138 vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
139 pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
140 vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
141 vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1,
143 vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1,
146 pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
147 vreinterpretq_u8_u64(desc[1]), shuf_msk2));
148 pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
149 vreinterpretq_u8_u64(desc[1]), shuf_msk1));
150 pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
151 vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
152 pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
153 vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
154 vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1,
156 vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1,
159 pkt_mb[5] = vreinterpretq_u64_u8(vqtbl1q_u8(
160 vreinterpretq_u8_u64(desc[2]), shuf_msk2));
161 pkt_mb[4] = vreinterpretq_u64_u8(vqtbl1q_u8(
162 vreinterpretq_u8_u64(desc[2]), shuf_msk1));
163 pkt_mb[5] = vreinterpretq_u64_u16(vsubq_u16(
164 vreinterpretq_u16_u64(pkt_mb[5]), len_adjust));
165 pkt_mb[4] = vreinterpretq_u64_u16(vsubq_u16(
166 vreinterpretq_u16_u64(pkt_mb[4]), len_adjust));
167 vst1q_u64((void *)&rx_pkts[5]->rx_descriptor_fields1,
169 vst1q_u64((void *)&rx_pkts[4]->rx_descriptor_fields1,
172 pkt_mb[7] = vreinterpretq_u64_u8(vqtbl1q_u8(
173 vreinterpretq_u8_u64(desc[3]), shuf_msk2));
174 pkt_mb[6] = vreinterpretq_u64_u8(vqtbl1q_u8(
175 vreinterpretq_u8_u64(desc[3]), shuf_msk1));
176 pkt_mb[7] = vreinterpretq_u64_u16(vsubq_u16(
177 vreinterpretq_u16_u64(pkt_mb[7]), len_adjust));
178 pkt_mb[6] = vreinterpretq_u64_u16(vsubq_u16(
179 vreinterpretq_u16_u64(pkt_mb[6]), len_adjust));
180 vst1q_u64((void *)&rx_pkts[7]->rx_descriptor_fields1,
182 vst1q_u64((void *)&rx_pkts[6]->rx_descriptor_fields1,
185 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
186 if (sw_ring + nb_used <= sw_ring_end)
187 nb_pkts_received += nb_used;
189 nb_pkts_received += sw_ring_end - sw_ring;
192 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
194 nb_pkts_received += sw_ring_end - sw_ring;
197 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
199 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
200 sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
201 rused += RTE_VIRTIO_DESC_PER_LOOP;
202 nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
207 vq->vq_used_cons_idx += nb_pkts_received;
208 vq->vq_free_cnt += nb_pkts_received;
209 rxvq->stats.packets += nb_pkts_received;
210 for (nb_used = 0; nb_used < nb_pkts_received; nb_used++)
211 virtio_update_packet_stats(&rxvq->stats, ref_rx_pkts[nb_used]);
213 return nb_pkts_received;