1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
3 * Copyright(C) 2019 IBM Corporation
14 #include <rte_byteorder.h>
15 #include <rte_branch_prediction.h>
16 #include <rte_cycles.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_errno.h>
20 #include <rte_memory.h>
21 #include <rte_mempool.h>
22 #include <rte_malloc.h>
24 #include <rte_prefetch.h>
25 #include <rte_string_fns.h>
27 #include "virtio_rxtx_simple.h"
29 #define RTE_VIRTIO_DESC_PER_LOOP 8
31 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
33 * This routine is for non-mergeable RX, one desc for each guest buffer.
34 * This routine is based on the RX ring layout optimization. Each entry in the
35 * avail ring points to the desc with the same index in the desc ring and this
36 * will never be changed in the driver.
38 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
41 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
44 struct virtnet_rx *rxvq = rx_queue;
45 struct virtqueue *vq = rxvq->vq;
46 struct virtio_hw *hw = vq->hw;
49 struct vring_used_elem *rused;
50 struct rte_mbuf **sw_ring;
51 struct rte_mbuf **sw_ring_end;
52 uint16_t nb_pkts_received = 0;
53 const vector unsigned char zero = {0};
55 const vector unsigned char shuf_msk1 = {
56 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
57 4, 5, 0xFF, 0xFF, /* vlan tci */
59 0xFF, 0xFF, /* vlan tci */
60 0xFF, 0xFF, 0xFF, 0xFF
63 const vector unsigned char shuf_msk2 = {
64 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
65 12, 13, 0xFF, 0xFF, /* pkt len */
67 0xFF, 0xFF, /* vlan tci */
68 0xFF, 0xFF, 0xFF, 0xFF
72 * Subtract the header length.
73 * In which case do we need the header length in used->len ?
75 const vector unsigned short len_adjust = {
77 (uint16_t)-vq->hw->vtnet_hdr_size, 0,
78 (uint16_t)-vq->hw->vtnet_hdr_size, 0,
82 if (unlikely(hw->started == 0))
83 return nb_pkts_received;
85 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
88 nb_used = VIRTQUEUE_NUSED(vq);
90 rte_compiler_barrier();
92 if (unlikely(nb_used == 0))
95 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
96 nb_used = RTE_MIN(nb_used, nb_pkts);
98 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
99 rused = &vq->vq_split.ring.used->ring[desc_idx];
100 sw_ring = &vq->sw_ring[desc_idx];
101 sw_ring_end = &vq->sw_ring[vq->vq_nentries];
103 rte_prefetch0(rused);
105 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
106 virtio_rxq_rearm_vec(rxvq);
107 if (unlikely(virtqueue_kick_prepare(vq)))
108 virtqueue_notify(vq);
111 for (nb_pkts_received = 0;
112 nb_pkts_received < nb_used;) {
113 vector unsigned char desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
114 vector unsigned char mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
115 vector unsigned char pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
117 mbp[0] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 0));
118 desc[0] = vec_vsx_ld(0, (unsigned char const *)(rused + 0));
119 *(vector unsigned char *)&rx_pkts[0] = mbp[0];
121 mbp[1] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 2));
122 desc[1] = vec_vsx_ld(0, (unsigned char const *)(rused + 2));
123 *(vector unsigned char *)&rx_pkts[2] = mbp[1];
125 mbp[2] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 4));
126 desc[2] = vec_vsx_ld(0, (unsigned char const *)(rused + 4));
127 *(vector unsigned char *)&rx_pkts[4] = mbp[2];
129 mbp[3] = vec_vsx_ld(0, (unsigned char const *)(sw_ring + 6));
130 desc[3] = vec_vsx_ld(0, (unsigned char const *)(rused + 6));
131 *(vector unsigned char *)&rx_pkts[6] = mbp[3];
133 pkt_mb[0] = vec_perm(desc[0], zero, shuf_msk1);
134 pkt_mb[1] = vec_perm(desc[0], zero, shuf_msk2);
135 pkt_mb[0] = (vector unsigned char)
136 ((vector unsigned short)pkt_mb[0] + len_adjust);
137 pkt_mb[1] = (vector unsigned char)
138 ((vector unsigned short)pkt_mb[1] + len_adjust);
139 *(vector unsigned char *)&rx_pkts[0]->rx_descriptor_fields1 =
141 *(vector unsigned char *)&rx_pkts[1]->rx_descriptor_fields1 =
144 pkt_mb[2] = vec_perm(desc[1], zero, shuf_msk1);
145 pkt_mb[3] = vec_perm(desc[1], zero, shuf_msk2);
146 pkt_mb[2] = (vector unsigned char)
147 ((vector unsigned short)pkt_mb[2] + len_adjust);
148 pkt_mb[3] = (vector unsigned char)
149 ((vector unsigned short)pkt_mb[3] + len_adjust);
150 *(vector unsigned char *)&rx_pkts[2]->rx_descriptor_fields1 =
152 *(vector unsigned char *)&rx_pkts[3]->rx_descriptor_fields1 =
155 pkt_mb[4] = vec_perm(desc[2], zero, shuf_msk1);
156 pkt_mb[5] = vec_perm(desc[2], zero, shuf_msk2);
157 pkt_mb[4] = (vector unsigned char)
158 ((vector unsigned short)pkt_mb[4] + len_adjust);
159 pkt_mb[5] = (vector unsigned char)
160 ((vector unsigned short)pkt_mb[5] + len_adjust);
161 *(vector unsigned char *)&rx_pkts[4]->rx_descriptor_fields1 =
163 *(vector unsigned char *)&rx_pkts[5]->rx_descriptor_fields1 =
166 pkt_mb[6] = vec_perm(desc[3], zero, shuf_msk1);
167 pkt_mb[7] = vec_perm(desc[3], zero, shuf_msk2);
168 pkt_mb[6] = (vector unsigned char)
169 ((vector unsigned short)pkt_mb[6] + len_adjust);
170 pkt_mb[7] = (vector unsigned char)
171 ((vector unsigned short)pkt_mb[7] + len_adjust);
172 *(vector unsigned char *)&rx_pkts[6]->rx_descriptor_fields1 =
174 *(vector unsigned char *)&rx_pkts[7]->rx_descriptor_fields1 =
177 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
178 if (sw_ring + nb_used <= sw_ring_end)
179 nb_pkts_received += nb_used;
181 nb_pkts_received += sw_ring_end - sw_ring;
184 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
186 nb_pkts_received += sw_ring_end - sw_ring;
189 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
191 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
192 sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
193 rused += RTE_VIRTIO_DESC_PER_LOOP;
194 nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
199 vq->vq_used_cons_idx += nb_pkts_received;
200 vq->vq_free_cnt += nb_pkts_received;
201 rxvq->stats.packets += nb_pkts_received;
202 return nb_pkts_received;