4 * Copyright (C) Cavium, Inc. 2016
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_byteorder.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_cycles.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
44 #include <rte_errno.h>
45 #include <rte_memory.h>
46 #include <rte_mempool.h>
47 #include <rte_malloc.h>
49 #include <rte_prefetch.h>
50 #include <rte_string_fns.h>
53 #include "virtio_rxtx_simple.h"
55 #define RTE_VIRTIO_VPMD_RX_BURST 32
56 #define RTE_VIRTIO_DESC_PER_LOOP 8
57 #define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
59 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
61 * This routine is for non-mergeable RX, one desc for each guest buffer.
62 * This routine is based on the RX ring layout optimization. Each entry in the
63 * avail ring points to the desc with the same index in the desc ring and this
64 * will never be changed in the driver.
66 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
69 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
72 struct virtnet_rx *rxvq = rx_queue;
73 struct virtqueue *vq = rxvq->vq;
74 struct virtio_hw *hw = vq->hw;
77 struct vring_used_elem *rused;
78 struct rte_mbuf **sw_ring;
79 struct rte_mbuf **sw_ring_end;
80 uint16_t nb_pkts_received = 0;
82 uint8x16_t shuf_msk1 = {
83 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
84 4, 5, 0xFF, 0xFF, /* pkt len */
86 0xFF, 0xFF, /* vlan tci */
87 0xFF, 0xFF, 0xFF, 0xFF
90 uint8x16_t shuf_msk2 = {
91 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
92 12, 13, 0xFF, 0xFF, /* pkt len */
94 0xFF, 0xFF, /* vlan tci */
95 0xFF, 0xFF, 0xFF, 0xFF
98 /* Subtract the header length.
99 * In which case do we need the header length in used->len ?
101 uint16x8_t len_adjust = {
103 (uint16_t)vq->hw->vtnet_hdr_size, 0,
104 (uint16_t)vq->hw->vtnet_hdr_size,
109 if (unlikely(hw->started == 0))
110 return nb_pkts_received;
112 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
115 nb_used = VIRTQUEUE_NUSED(vq);
119 if (unlikely(nb_used == 0))
122 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
123 nb_used = RTE_MIN(nb_used, nb_pkts);
125 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
126 rused = &vq->vq_ring.used->ring[desc_idx];
127 sw_ring = &vq->sw_ring[desc_idx];
128 sw_ring_end = &vq->sw_ring[vq->vq_nentries];
130 rte_prefetch_non_temporal(rused);
132 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
133 virtio_rxq_rearm_vec(rxvq);
134 if (unlikely(virtqueue_kick_prepare(vq)))
135 virtqueue_notify(vq);
138 for (nb_pkts_received = 0;
139 nb_pkts_received < nb_used;) {
140 uint64x2_t desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
141 uint64x2_t mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
142 uint64x2_t pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
144 mbp[0] = vld1q_u64((uint64_t *)(sw_ring + 0));
145 desc[0] = vld1q_u64((uint64_t *)(rused + 0));
146 vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0]);
148 mbp[1] = vld1q_u64((uint64_t *)(sw_ring + 2));
149 desc[1] = vld1q_u64((uint64_t *)(rused + 2));
150 vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1]);
152 mbp[2] = vld1q_u64((uint64_t *)(sw_ring + 4));
153 desc[2] = vld1q_u64((uint64_t *)(rused + 4));
154 vst1q_u64((uint64_t *)&rx_pkts[4], mbp[2]);
156 mbp[3] = vld1q_u64((uint64_t *)(sw_ring + 6));
157 desc[3] = vld1q_u64((uint64_t *)(rused + 6));
158 vst1q_u64((uint64_t *)&rx_pkts[6], mbp[3]);
160 pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
161 vreinterpretq_u8_u64(desc[0]), shuf_msk2));
162 pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
163 vreinterpretq_u8_u64(desc[0]), shuf_msk1));
164 pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
165 vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
166 pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
167 vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
168 vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1,
170 vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1,
173 pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
174 vreinterpretq_u8_u64(desc[1]), shuf_msk2));
175 pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
176 vreinterpretq_u8_u64(desc[1]), shuf_msk1));
177 pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
178 vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
179 pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
180 vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
181 vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1,
183 vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1,
186 pkt_mb[5] = vreinterpretq_u64_u8(vqtbl1q_u8(
187 vreinterpretq_u8_u64(desc[2]), shuf_msk2));
188 pkt_mb[4] = vreinterpretq_u64_u8(vqtbl1q_u8(
189 vreinterpretq_u8_u64(desc[2]), shuf_msk1));
190 pkt_mb[5] = vreinterpretq_u64_u16(vsubq_u16(
191 vreinterpretq_u16_u64(pkt_mb[5]), len_adjust));
192 pkt_mb[4] = vreinterpretq_u64_u16(vsubq_u16(
193 vreinterpretq_u16_u64(pkt_mb[4]), len_adjust));
194 vst1q_u64((void *)&rx_pkts[5]->rx_descriptor_fields1,
196 vst1q_u64((void *)&rx_pkts[4]->rx_descriptor_fields1,
199 pkt_mb[7] = vreinterpretq_u64_u8(vqtbl1q_u8(
200 vreinterpretq_u8_u64(desc[3]), shuf_msk2));
201 pkt_mb[6] = vreinterpretq_u64_u8(vqtbl1q_u8(
202 vreinterpretq_u8_u64(desc[3]), shuf_msk1));
203 pkt_mb[7] = vreinterpretq_u64_u16(vsubq_u16(
204 vreinterpretq_u16_u64(pkt_mb[7]), len_adjust));
205 pkt_mb[6] = vreinterpretq_u64_u16(vsubq_u16(
206 vreinterpretq_u16_u64(pkt_mb[6]), len_adjust));
207 vst1q_u64((void *)&rx_pkts[7]->rx_descriptor_fields1,
209 vst1q_u64((void *)&rx_pkts[6]->rx_descriptor_fields1,
212 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
213 if (sw_ring + nb_used <= sw_ring_end)
214 nb_pkts_received += nb_used;
216 nb_pkts_received += sw_ring_end - sw_ring;
219 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
221 nb_pkts_received += sw_ring_end - sw_ring;
224 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
226 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
227 sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
228 rused += RTE_VIRTIO_DESC_PER_LOOP;
229 nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
234 vq->vq_used_cons_idx += nb_pkts_received;
235 vq->vq_free_cnt += nb_pkts_received;
236 rxvq->stats.packets += nb_pkts_received;
237 return nb_pkts_received;