4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <tmmintrin.h>
42 #include <rte_cycles.h>
43 #include <rte_memory.h>
44 #include <rte_memzone.h>
45 #include <rte_branch_prediction.h>
46 #include <rte_mempool.h>
47 #include <rte_malloc.h>
49 #include <rte_ether.h>
50 #include <rte_ethdev.h>
51 #include <rte_prefetch.h>
52 #include <rte_string_fns.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include "virtio_logs.h"
57 #include "virtio_ethdev.h"
58 #include "virtqueue.h"
59 #include "virtio_rxtx.h"
61 #define RTE_VIRTIO_VPMD_RX_BURST 32
62 #define RTE_VIRTIO_DESC_PER_LOOP 8
63 #define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
65 #ifndef __INTEL_COMPILER
66 #pragma GCC diagnostic ignored "-Wcast-qual"
69 int __attribute__((cold))
70 virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
71 struct rte_mbuf *cookie)
73 struct vq_desc_extra *dxp;
74 struct vring_desc *start_dp;
77 desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
78 dxp = &vq->vq_descx[desc_idx];
79 dxp->cookie = (void *)cookie;
80 vq->sw_ring[desc_idx] = cookie;
82 start_dp = vq->vq_ring.desc;
83 start_dp[desc_idx].addr =
84 VIRTIO_MBUF_ADDR(cookie, vq) +
85 RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
86 start_dp[desc_idx].len = cookie->buf_len -
87 RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
96 virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
100 struct rte_mbuf **sw_ring;
101 struct vring_desc *start_dp;
103 struct virtqueue *vq = rxvq->vq;
105 desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
106 sw_ring = &vq->sw_ring[desc_idx];
107 start_dp = &vq->vq_ring.desc[desc_idx];
109 ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
110 RTE_VIRTIO_VPMD_RX_REARM_THRESH);
112 rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
113 RTE_VIRTIO_VPMD_RX_REARM_THRESH;
117 for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) {
120 p = (uintptr_t)&sw_ring[i]->rearm_data;
121 *(uint64_t *)p = rxvq->mbuf_initializer;
124 VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
125 RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
126 start_dp[i].len = sw_ring[i]->buf_len -
127 RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
130 vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
131 vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
132 vq_update_avail_idx(vq);
135 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
137 * This routine is for non-mergeable RX, one desc for each guest buffer.
138 * This routine is based on the RX ring layout optimization. Each entry in the
139 * avail ring points to the desc with the same index in the desc ring and this
140 * will never be changed in the driver.
142 * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
145 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
148 struct virtnet_rx *rxvq = rx_queue;
149 struct virtqueue *vq = rxvq->vq;
152 struct vring_used_elem *rused;
153 struct rte_mbuf **sw_ring;
154 struct rte_mbuf **sw_ring_end;
155 uint16_t nb_pkts_received;
156 __m128i shuf_msk1, shuf_msk2, len_adjust;
158 shuf_msk1 = _mm_set_epi8(
159 0xFF, 0xFF, 0xFF, 0xFF,
160 0xFF, 0xFF, /* vlan tci */
162 0xFF, 0xFF, 5, 4, /* pkt len */
163 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
167 shuf_msk2 = _mm_set_epi8(
168 0xFF, 0xFF, 0xFF, 0xFF,
169 0xFF, 0xFF, /* vlan tci */
170 13, 12, /* dat len */
171 0xFF, 0xFF, 13, 12, /* pkt len */
172 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
175 /* Subtract the header length.
176 * In which case do we need the header length in used->len ?
178 len_adjust = _mm_set_epi16(
181 (uint16_t)-vq->hw->vtnet_hdr_size,
182 0, (uint16_t)-vq->hw->vtnet_hdr_size,
185 if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
188 nb_used = VIRTQUEUE_NUSED(vq);
190 rte_compiler_barrier();
192 if (unlikely(nb_used == 0))
195 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
196 nb_used = RTE_MIN(nb_used, nb_pkts);
198 desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
199 rused = &vq->vq_ring.used->ring[desc_idx];
200 sw_ring = &vq->sw_ring[desc_idx];
201 sw_ring_end = &vq->sw_ring[vq->vq_nentries];
203 _mm_prefetch((const void *)rused, _MM_HINT_T0);
205 if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
206 virtio_rxq_rearm_vec(rxvq);
207 if (unlikely(virtqueue_kick_prepare(vq)))
208 virtqueue_notify(vq);
211 for (nb_pkts_received = 0;
212 nb_pkts_received < nb_used;) {
213 __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
214 __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
215 __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
217 mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0));
218 desc[0] = _mm_loadu_si128((__m128i *)(rused + 0));
219 _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]);
221 mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2));
222 desc[1] = _mm_loadu_si128((__m128i *)(rused + 2));
223 _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]);
225 mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4));
226 desc[2] = _mm_loadu_si128((__m128i *)(rused + 4));
227 _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]);
229 mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6));
230 desc[3] = _mm_loadu_si128((__m128i *)(rused + 6));
231 _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]);
233 pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2);
234 pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1);
235 pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust);
236 pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust);
237 _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1,
239 _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1,
242 pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2);
243 pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1);
244 pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust);
245 pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust);
246 _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1,
248 _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1,
251 pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2);
252 pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1);
253 pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust);
254 pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust);
255 _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1,
257 _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1,
260 pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2);
261 pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1);
262 pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust);
263 pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust);
264 _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1,
266 _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1,
269 if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
270 if (sw_ring + nb_used <= sw_ring_end)
271 nb_pkts_received += nb_used;
273 nb_pkts_received += sw_ring_end - sw_ring;
276 if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
278 nb_pkts_received += sw_ring_end - sw_ring;
281 nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
283 rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
284 sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
285 rused += RTE_VIRTIO_DESC_PER_LOOP;
286 nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
291 vq->vq_used_cons_idx += nb_pkts_received;
292 vq->vq_free_cnt += nb_pkts_received;
293 rxvq->stats.packets += nb_pkts_received;
294 return nb_pkts_received;
297 #define VIRTIO_TX_FREE_THRESH 32
298 #define VIRTIO_TX_MAX_FREE_BUF_SZ 32
299 #define VIRTIO_TX_FREE_NR 32
300 /* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
302 virtio_xmit_cleanup(struct virtqueue *vq)
304 uint16_t i, desc_idx;
305 uint32_t nb_free = 0;
306 struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
308 desc_idx = (uint16_t)(vq->vq_used_cons_idx &
309 ((vq->vq_nentries >> 1) - 1));
310 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
311 m = __rte_pktmbuf_prefree_seg(m);
312 if (likely(m != NULL)) {
315 for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
316 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
317 m = __rte_pktmbuf_prefree_seg(m);
318 if (likely(m != NULL)) {
319 if (likely(m->pool == free[0]->pool))
322 rte_mempool_put_bulk(free[0]->pool,
324 RTE_MIN(RTE_DIM(free),
331 rte_mempool_put_bulk(free[0]->pool, (void **)free,
332 RTE_MIN(RTE_DIM(free), nb_free));
334 for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
335 m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
336 m = __rte_pktmbuf_prefree_seg(m);
338 rte_mempool_put(m->pool, m);
342 vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
343 vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
347 virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
350 struct virtnet_tx *txvq = tx_queue;
351 struct virtqueue *vq = txvq->vq;
354 struct vring_desc *start_dp;
355 uint16_t nb_tail, nb_commit;
357 uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
359 nb_used = VIRTQUEUE_NUSED(vq);
360 rte_compiler_barrier();
362 if (nb_used >= VIRTIO_TX_FREE_THRESH)
363 virtio_xmit_cleanup(vq);
365 nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
366 desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
367 start_dp = vq->vq_ring.desc;
368 nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
370 if (nb_commit >= nb_tail) {
371 for (i = 0; i < nb_tail; i++)
372 vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
373 for (i = 0; i < nb_tail; i++) {
374 start_dp[desc_idx].addr =
375 VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
376 start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
380 nb_commit -= nb_tail;
383 for (i = 0; i < nb_commit; i++)
384 vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
385 for (i = 0; i < nb_commit; i++) {
386 start_dp[desc_idx].addr =
387 VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
388 start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
393 rte_compiler_barrier();
395 vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
396 vq->vq_avail_idx += nb_pkts;
397 vq->vq_ring.avail->idx = vq->vq_avail_idx;
398 txvq->stats.packets += nb_pkts;
400 if (likely(nb_pkts)) {
401 if (unlikely(virtqueue_kick_prepare(vq)))
402 virtqueue_notify(vq);
408 int __attribute__((cold))
409 virtio_rxq_vec_setup(struct virtnet_rx *rxq)
412 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
415 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
416 mb_def.port = rxq->port_id;
417 rte_mbuf_refcnt_set(&mb_def, 1);
419 /* prevent compiler reordering: rearm_data covers previous fields */
420 rte_compiler_barrier();
421 p = (uintptr_t)&mb_def.rearm_data;
422 rxq->mbuf_initializer = *(uint64_t *)p;