1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
11 #include <tmmintrin.h>
13 #include <rte_byteorder.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_cycles.h>
16 #include <rte_ether.h>
17 #include <rte_ethdev.h>
18 #include <rte_errno.h>
19 #include <rte_memory.h>
20 #include <rte_mempool.h>
21 #include <rte_malloc.h>
23 #include <rte_prefetch.h>
24 #include <rte_string_fns.h>
26 #include "virtio_rxtx_simple.h"
28 #define RTE_VIRTIO_VPMD_RX_BURST 32
29 #define RTE_VIRTIO_DESC_PER_LOOP 8
30 #define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
32 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
34 * This routine is for non-mergeable RX, one desc for each guest buffer.
35 * This routine is based on the RX ring layout optimization. Each entry in the
36 * avail ring points to the desc with the same index in the desc ring and this
37 * will never be changed in the driver.
39 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
42 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
45 struct virtnet_rx *rxvq = rx_queue;
46 struct virtqueue *vq = rxvq->vq;
47 struct virtio_hw *hw = vq->hw;
50 struct vring_used_elem *rused;
51 struct rte_mbuf **sw_ring;
52 struct rte_mbuf **sw_ring_end;
53 uint16_t nb_pkts_received = 0;
54 __m128i shuf_msk1, shuf_msk2, len_adjust;
56 shuf_msk1 = _mm_set_epi8(
57 0xFF, 0xFF, 0xFF, 0xFF,
58 0xFF, 0xFF, /* vlan tci */
60 0xFF, 0xFF, 5, 4, /* pkt len */
61 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
65 shuf_msk2 = _mm_set_epi8(
66 0xFF, 0xFF, 0xFF, 0xFF,
67 0xFF, 0xFF, /* vlan tci */
69 0xFF, 0xFF, 13, 12, /* pkt len */
70 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
73 /* Subtract the header length.
74 * In which case do we need the header length in used->len ?
76 len_adjust = _mm_set_epi16(
79 (uint16_t)-vq->hw->vtnet_hdr_size,
80 0, (uint16_t)-vq->hw->vtnet_hdr_size,
83 if (unlikely(hw->started == 0))
84 return nb_pkts_received;
86 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
89 nb_used = VIRTQUEUE_NUSED(vq);
91 rte_compiler_barrier();
93 if (unlikely(nb_used == 0))
96 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
97 nb_used = RTE_MIN(nb_used, nb_pkts);
99 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
100 rused = &vq->vq_ring.used->ring[desc_idx];
101 sw_ring = &vq->sw_ring[desc_idx];
102 sw_ring_end = &vq->sw_ring[vq->vq_nentries];
104 rte_prefetch0(rused);
106 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
107 virtio_rxq_rearm_vec(rxvq);
108 if (unlikely(virtqueue_kick_prepare(vq)))
109 virtqueue_notify(vq);
112 for (nb_pkts_received = 0;
113 nb_pkts_received < nb_used;) {
114 __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
115 __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
116 __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
118 mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0));
119 desc[0] = _mm_loadu_si128((__m128i *)(rused + 0));
120 _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]);
122 mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2));
123 desc[1] = _mm_loadu_si128((__m128i *)(rused + 2));
124 _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]);
126 mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4));
127 desc[2] = _mm_loadu_si128((__m128i *)(rused + 4));
128 _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]);
130 mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6));
131 desc[3] = _mm_loadu_si128((__m128i *)(rused + 6));
132 _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]);
134 pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2);
135 pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1);
136 pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust);
137 pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust);
138 _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1,
140 _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1,
143 pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2);
144 pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1);
145 pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust);
146 pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust);
147 _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1,
149 _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1,
152 pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2);
153 pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1);
154 pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust);
155 pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust);
156 _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1,
158 _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1,
161 pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2);
162 pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1);
163 pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust);
164 pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust);
165 _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1,
167 _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1,
170 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
171 if (sw_ring + nb_used <= sw_ring_end)
172 nb_pkts_received += nb_used;
174 nb_pkts_received += sw_ring_end - sw_ring;
177 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
179 nb_pkts_received += sw_ring_end - sw_ring;
182 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
184 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
185 sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
186 rused += RTE_VIRTIO_DESC_PER_LOOP;
187 nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
192 vq->vq_used_cons_idx += nb_pkts_received;
193 vq->vq_free_cnt += nb_pkts_received;
194 rxvq->stats.packets += nb_pkts_received;
195 return nb_pkts_received;