1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 * Copyright(C) 2019 IBM Corporation
14 #include <rte_byteorder.h>
15 #include <rte_branch_prediction.h>
16 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_errno.h>
20 #include <rte_memory.h>
21 #include <rte_mempool.h>
22 #include <rte_malloc.h>
24 #include <rte_prefetch.h>
25 #include <rte_string_fns.h>
27 #include "virtio_rxtx_simple.h"
29 #define RTE_VIRTIO_DESC_PER_LOOP 8
31 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
33 * This routine is for non-mergeable RX, one desc for each guest buffer.
34 * This routine is based on the RX ring layout optimization. Each entry in the
35 * avail ring points to the desc with the same index in the desc ring and this
36 * will never be changed in the driver.
38 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
41 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
44 struct virtnet_rx *rxvq = rx_queue;
45 struct virtqueue *vq = rxvq->vq;
46 struct virtio_hw *hw = vq->hw;
47 uint16_t nb_used, nb_total;
49 struct vring_used_elem *rused;
50 struct rte_mbuf **sw_ring;
51 struct rte_mbuf **sw_ring_end;
52 struct rte_mbuf **ref_rx_pkts;
53 uint16_t nb_pkts_received = 0;
54 const vector unsigned char zero = {0};
56 const vector unsigned char shuf_msk1 = {
57 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
58 4, 5, 0xFF, 0xFF, /* vlan tci */
60 0xFF, 0xFF, /* vlan tci */
61 0xFF, 0xFF, 0xFF, 0xFF
64 const vector unsigned char shuf_msk2 = {
65 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
66 12, 13, 0xFF, 0xFF, /* pkt len */
68 0xFF, 0xFF, /* vlan tci */
69 0xFF, 0xFF, 0xFF, 0xFF
73 * Subtract the header length.
74 * In which case do we need the header length in used->len ?
76 const vector unsigned short len_adjust = {
78 (uint16_t)-vq->hw->vtnet_hdr_size, 0,
79 (uint16_t)-vq->hw->vtnet_hdr_size, 0,
83 if (unlikely(hw->started == 0))
84 return nb_pkts_received;
86 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
89 nb_used = VIRTQUEUE_NUSED(vq);
91 rte_compiler_barrier();
93 if (unlikely(nb_used == 0))
96 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
97 nb_used = RTE_MIN(nb_used, nb_pkts);
99 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
100 rused = &vq->vq_split.ring.used->ring[desc_idx];
101 sw_ring = &vq->sw_ring[desc_idx];
102 sw_ring_end = &vq->sw_ring[vq->vq_nentries];
104 rte_prefetch0(rused);
106 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
107 virtio_rxq_rearm_vec(rxvq);
108 if (unlikely(virtqueue_kick_prepare(vq)))
109 virtqueue_notify(vq);
113 ref_rx_pkts = rx_pkts;
114 for (nb_pkts_received = 0;
115 nb_pkts_received < nb_total;) {
116 vector unsigned char desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
117 vector unsigned char mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
118 vector unsigned char pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
120 mbp[0] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 0));
121 desc[0] = vec_vsx_ld(0, (unsigned char const *)(rused + 0));
122 *(vector unsigned char *)&rx_pkts[0] = mbp[0];
124 mbp[1] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 2));
125 desc[1] = vec_vsx_ld(0, (unsigned char const *)(rused + 2));
126 *(vector unsigned char *)&rx_pkts[2] = mbp[1];
128 mbp[2] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 4));
129 desc[2] = vec_vsx_ld(0, (unsigned char const *)(rused + 4));
130 *(vector unsigned char *)&rx_pkts[4] = mbp[2];
132 mbp[3] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 6));
133 desc[3] = vec_vsx_ld(0, (unsigned char const *)(rused + 6));
134 *(vector unsigned char *)&rx_pkts[6] = mbp[3];
136 pkt_mb[0] = vec_perm(desc[0], zero, shuf_msk1);
137 pkt_mb[1] = vec_perm(desc[0], zero, shuf_msk2);
138 pkt_mb[0] = (vector unsigned char)
139 ((vector unsigned short)pkt_mb[0] + len_adjust);
140 pkt_mb[1] = (vector unsigned char)
141 ((vector unsigned short)pkt_mb[1] + len_adjust);
142 *(vector unsigned char *)&rx_pkts[0]->rx_descriptor_fields1 =
144 *(vector unsigned char *)&rx_pkts[1]->rx_descriptor_fields1 =
147 pkt_mb[2] = vec_perm(desc[1], zero, shuf_msk1);
148 pkt_mb[3] = vec_perm(desc[1], zero, shuf_msk2);
149 pkt_mb[2] = (vector unsigned char)
150 ((vector unsigned short)pkt_mb[2] + len_adjust);
151 pkt_mb[3] = (vector unsigned char)
152 ((vector unsigned short)pkt_mb[3] + len_adjust);
153 *(vector unsigned char *)&rx_pkts[2]->rx_descriptor_fields1 =
155 *(vector unsigned char *)&rx_pkts[3]->rx_descriptor_fields1 =
158 pkt_mb[4] = vec_perm(desc[2], zero, shuf_msk1);
159 pkt_mb[5] = vec_perm(desc[2], zero, shuf_msk2);
160 pkt_mb[4] = (vector unsigned char)
161 ((vector unsigned short)pkt_mb[4] + len_adjust);
162 pkt_mb[5] = (vector unsigned char)
163 ((vector unsigned short)pkt_mb[5] + len_adjust);
164 *(vector unsigned char *)&rx_pkts[4]->rx_descriptor_fields1 =
166 *(vector unsigned char *)&rx_pkts[5]->rx_descriptor_fields1 =
169 pkt_mb[6] = vec_perm(desc[3], zero, shuf_msk1);
170 pkt_mb[7] = vec_perm(desc[3], zero, shuf_msk2);
171 pkt_mb[6] = (vector unsigned char)
172 ((vector unsigned short)pkt_mb[6] + len_adjust);
173 pkt_mb[7] = (vector unsigned char)
174 ((vector unsigned short)pkt_mb[7] + len_adjust);
175 *(vector unsigned char *)&rx_pkts[6]->rx_descriptor_fields1 =
177 *(vector unsigned char *)&rx_pkts[7]->rx_descriptor_fields1 =
180 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
181 if (sw_ring + nb_used <= sw_ring_end)
182 nb_pkts_received += nb_used;
184 nb_pkts_received += sw_ring_end - sw_ring;
187 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
189 nb_pkts_received += sw_ring_end - sw_ring;
192 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
194 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
195 sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
196 rused += RTE_VIRTIO_DESC_PER_LOOP;
197 nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
202 vq->vq_used_cons_idx += nb_pkts_received;
203 vq->vq_free_cnt += nb_pkts_received;
204 rxvq->stats.packets += nb_pkts_received;
205 for (nb_used = 0; nb_used < nb_pkts_received; nb_used++)
206 virtio_update_packet_stats(&rxvq->stats, ref_rx_pkts[nb_used]);
208 return nb_pkts_received;