1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
45 struct virtnet_rx *rxvq = rxq;
46 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
48 return virtqueue_nused(vq) >= offset;
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
54 vq->vq_free_cnt += num;
55 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
61 struct vring_desc *dp, *dp_tail;
62 struct vq_desc_extra *dxp;
63 uint16_t desc_idx_last = desc_idx;
65 dp = &vq->vq_split.ring.desc[desc_idx];
66 dxp = &vq->vq_descx[desc_idx];
67 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 while (dp->flags & VRING_DESC_F_NEXT) {
70 desc_idx_last = dp->next;
71 dp = &vq->vq_split.ring.desc[dp->next];
77 * We must append the existing free chain, if any, to the end of
78 * newly freed chain. If the virtqueue was completely used, then
79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
81 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 vq->vq_desc_head_idx = desc_idx;
84 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85 dp_tail->next = desc_idx;
88 vq->vq_desc_tail_idx = desc_idx_last;
89 dp->next = VQ_RING_DESC_CHAIN_END;
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
95 uint32_t s = mbuf->pkt_len;
96 struct rte_ether_addr *ea;
101 stats->size_bins[1]++;
102 } else if (s > 64 && s < 1024) {
105 /* count zeros, and offset into correct bin */
106 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107 stats->size_bins[bin]++;
110 stats->size_bins[0]++;
112 stats->size_bins[6]++;
114 stats->size_bins[7]++;
117 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118 if (rte_is_multicast_ether_addr(ea)) {
119 if (rte_is_broadcast_ether_addr(ea))
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
129 VIRTIO_DUMP_PACKET(m, m->data_len);
131 virtio_update_packet_stats(&rxvq->stats, m);
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136 struct rte_mbuf **rx_pkts,
140 struct rte_mbuf *cookie;
143 struct vring_packed_desc *desc;
146 desc = vq->vq_packed.ring.desc;
148 for (i = 0; i < num; i++) {
149 used_idx = vq->vq_used_cons_idx;
150 /* desc_is_used has a load-acquire or rte_io_rmb inside
151 * and wait for used desc in virtqueue.
153 if (!desc_is_used(&desc[used_idx], vq))
155 len[i] = desc[used_idx].len;
156 id = desc[used_idx].id;
157 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158 if (unlikely(cookie == NULL)) {
159 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160 vq->vq_used_cons_idx);
163 rte_prefetch0(cookie);
164 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
168 vq->vq_used_cons_idx++;
169 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170 vq->vq_used_cons_idx -= vq->vq_nentries;
171 vq->vq_packed.used_wrap_counter ^= 1;
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180 uint32_t *len, uint16_t num)
182 struct vring_used_elem *uep;
183 struct rte_mbuf *cookie;
184 uint16_t used_idx, desc_idx;
187 /* Caller does the check */
188 for (i = 0; i < num ; i++) {
189 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190 uep = &vq->vq_split.ring.used->ring[used_idx];
191 desc_idx = (uint16_t) uep->id;
193 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
195 if (unlikely(cookie == NULL)) {
196 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197 vq->vq_used_cons_idx);
201 rte_prefetch0(cookie);
202 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
204 vq->vq_used_cons_idx++;
205 vq_ring_free_chain(vq, desc_idx);
206 vq->vq_descx[desc_idx].cookie = NULL;
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214 struct rte_mbuf **rx_pkts,
218 struct vring_used_elem *uep;
219 struct rte_mbuf *cookie;
220 uint16_t used_idx = 0;
223 if (unlikely(num == 0))
226 for (i = 0; i < num; i++) {
227 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228 /* Desc idx same as used idx */
229 uep = &vq->vq_split.ring.used->ring[used_idx];
231 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
233 if (unlikely(cookie == NULL)) {
234 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235 vq->vq_used_cons_idx);
239 rte_prefetch0(cookie);
240 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
242 vq->vq_used_cons_idx++;
243 vq->vq_descx[used_idx].cookie = NULL;
246 vq_ring_free_inorder(vq, used_idx, i);
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252 struct rte_mbuf **cookies,
255 struct vq_desc_extra *dxp;
256 struct virtio_hw *hw = vq->hw;
257 struct vring_desc *start_dp;
258 uint16_t head_idx, idx, i = 0;
260 if (unlikely(vq->vq_free_cnt == 0))
262 if (unlikely(vq->vq_free_cnt < num))
265 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266 start_dp = vq->vq_split.ring.desc;
269 idx = head_idx & (vq->vq_nentries - 1);
270 dxp = &vq->vq_descx[idx];
271 dxp->cookie = (void *)cookies[i];
274 start_dp[idx].addr = cookies[i]->buf_iova +
275 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
276 start_dp[idx].len = cookies[i]->buf_len -
277 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
278 start_dp[idx].flags = VRING_DESC_F_WRITE;
280 vq_update_avail_ring(vq, idx);
285 vq->vq_desc_head_idx += num;
286 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
294 struct vq_desc_extra *dxp;
295 struct virtio_hw *hw = vq->hw;
296 struct vring_desc *start_dp = vq->vq_split.ring.desc;
299 if (unlikely(vq->vq_free_cnt == 0))
301 if (unlikely(vq->vq_free_cnt < num))
304 if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
307 for (i = 0; i < num; i++) {
308 idx = vq->vq_desc_head_idx;
309 dxp = &vq->vq_descx[idx];
310 dxp->cookie = (void *)cookie[i];
313 start_dp[idx].addr = cookie[i]->buf_iova +
314 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
315 start_dp[idx].len = cookie[i]->buf_len -
316 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
317 start_dp[idx].flags = VRING_DESC_F_WRITE;
318 vq->vq_desc_head_idx = start_dp[idx].next;
319 vq_update_avail_ring(vq, idx);
320 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
321 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
326 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
332 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
333 struct rte_mbuf **cookie, uint16_t num)
335 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
336 uint16_t flags = vq->vq_packed.cached_flags;
337 struct virtio_hw *hw = vq->hw;
338 struct vq_desc_extra *dxp;
342 if (unlikely(vq->vq_free_cnt == 0))
344 if (unlikely(vq->vq_free_cnt < num))
347 for (i = 0; i < num; i++) {
348 idx = vq->vq_avail_idx;
349 dxp = &vq->vq_descx[idx];
350 dxp->cookie = (void *)cookie[i];
353 start_dp[idx].addr = cookie[i]->buf_iova +
354 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
355 start_dp[idx].len = cookie[i]->buf_len -
356 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
358 vq->vq_desc_head_idx = dxp->next;
359 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
360 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
362 virtqueue_store_flags_packed(&start_dp[idx], flags,
365 if (++vq->vq_avail_idx >= vq->vq_nentries) {
366 vq->vq_avail_idx -= vq->vq_nentries;
367 vq->vq_packed.cached_flags ^=
368 VRING_PACKED_DESC_F_AVAIL_USED;
369 flags = vq->vq_packed.cached_flags;
372 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
376 /* When doing TSO, the IP length is not included in the pseudo header
377 * checksum of the packet given to the PMD, but for virtio it is
381 virtio_tso_fix_cksum(struct rte_mbuf *m)
383 /* common case: header is not fragmented */
384 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
386 struct rte_ipv4_hdr *iph;
387 struct rte_ipv6_hdr *ip6h;
388 struct rte_tcp_hdr *th;
389 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
392 iph = rte_pktmbuf_mtod_offset(m,
393 struct rte_ipv4_hdr *, m->l2_len);
394 th = RTE_PTR_ADD(iph, m->l3_len);
395 if ((iph->version_ihl >> 4) == 4) {
396 iph->hdr_checksum = 0;
397 iph->hdr_checksum = rte_ipv4_cksum(iph);
398 ip_len = iph->total_length;
399 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
402 ip6h = (struct rte_ipv6_hdr *)iph;
403 ip_paylen = ip6h->payload_len;
406 /* calculate the new phdr checksum not including ip_paylen */
407 prev_cksum = th->cksum;
410 tmp = (tmp & 0xffff) + (tmp >> 16);
413 /* replace it in the packet */
414 th->cksum = new_cksum;
422 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
423 struct rte_mbuf **cookies,
426 struct vq_desc_extra *dxp;
427 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
428 struct vring_desc *start_dp;
429 struct virtio_net_hdr *hdr;
431 int16_t head_size = vq->hw->vtnet_hdr_size;
434 idx = vq->vq_desc_head_idx;
435 start_dp = vq->vq_split.ring.desc;
438 idx = idx & (vq->vq_nentries - 1);
439 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
440 dxp->cookie = (void *)cookies[i];
442 virtio_update_packet_stats(&txvq->stats, cookies[i]);
444 hdr = rte_pktmbuf_mtod_offset(cookies[i],
445 struct virtio_net_hdr *, -head_size);
447 /* if offload disabled, hdr is not zeroed yet, do it now */
448 if (!vq->hw->has_tx_offload)
449 virtqueue_clear_net_hdr(hdr);
451 virtqueue_xmit_offload(hdr, cookies[i]);
453 start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
454 start_dp[idx].len = cookies[i]->data_len + head_size;
455 start_dp[idx].flags = 0;
458 vq_update_avail_ring(vq, idx);
464 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
465 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
469 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
470 struct rte_mbuf *cookie,
473 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
474 struct vring_packed_desc *dp;
475 struct vq_desc_extra *dxp;
476 uint16_t idx, id, flags;
477 int16_t head_size = vq->hw->vtnet_hdr_size;
478 struct virtio_net_hdr *hdr;
480 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
481 idx = vq->vq_avail_idx;
482 dp = &vq->vq_packed.ring.desc[idx];
484 dxp = &vq->vq_descx[id];
486 dxp->cookie = cookie;
488 flags = vq->vq_packed.cached_flags;
490 /* prepend cannot fail, checked by caller */
491 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
494 /* if offload disabled, hdr is not zeroed yet, do it now */
495 if (!vq->hw->has_tx_offload)
496 virtqueue_clear_net_hdr(hdr);
498 virtqueue_xmit_offload(hdr, cookie);
500 dp->addr = rte_mbuf_data_iova(cookie) - head_size;
501 dp->len = cookie->data_len + head_size;
504 if (++vq->vq_avail_idx >= vq->vq_nentries) {
505 vq->vq_avail_idx -= vq->vq_nentries;
506 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
512 vq->vq_desc_head_idx = dxp->next;
513 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
514 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
517 virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
521 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
522 uint16_t needed, int use_indirect, int can_push,
525 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
526 struct vq_desc_extra *dxp;
527 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
528 struct vring_desc *start_dp;
529 uint16_t seg_num = cookie->nb_segs;
530 uint16_t head_idx, idx;
531 int16_t head_size = vq->hw->vtnet_hdr_size;
532 bool prepend_header = false;
533 struct virtio_net_hdr *hdr;
535 head_idx = vq->vq_desc_head_idx;
538 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
540 dxp = &vq->vq_descx[idx];
541 dxp->cookie = (void *)cookie;
542 dxp->ndescs = needed;
544 start_dp = vq->vq_split.ring.desc;
547 /* prepend cannot fail, checked by caller */
548 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
550 prepend_header = true;
552 /* if offload disabled, it is not zeroed below, do it now */
553 if (!vq->hw->has_tx_offload)
554 virtqueue_clear_net_hdr(hdr);
555 } else if (use_indirect) {
556 /* setup tx ring slot to point to indirect
557 * descriptor list stored in reserved region.
559 * the first slot in indirect ring is already preset
560 * to point to the header in reserved region
562 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
563 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
564 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
565 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
566 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
568 /* loop below will fill in rest of the indirect elements */
569 start_dp = txr[idx].tx_indir;
572 /* setup first tx ring slot to point to header
573 * stored in reserved region.
575 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
576 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
577 start_dp[idx].len = vq->hw->vtnet_hdr_size;
578 start_dp[idx].flags = VRING_DESC_F_NEXT;
579 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
581 idx = start_dp[idx].next;
584 if (vq->hw->has_tx_offload)
585 virtqueue_xmit_offload(hdr, cookie);
588 start_dp[idx].addr = rte_mbuf_data_iova(cookie);
589 start_dp[idx].len = cookie->data_len;
590 if (prepend_header) {
591 start_dp[idx].addr -= head_size;
592 start_dp[idx].len += head_size;
593 prepend_header = false;
595 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
596 idx = start_dp[idx].next;
597 } while ((cookie = cookie->next) != NULL);
600 idx = vq->vq_split.ring.desc[head_idx].next;
602 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
604 vq->vq_desc_head_idx = idx;
605 vq_update_avail_ring(vq, head_idx);
608 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
609 vq->vq_desc_tail_idx = idx;
614 virtio_dev_cq_start(struct rte_eth_dev *dev)
616 struct virtio_hw *hw = dev->data->dev_private;
619 rte_spinlock_init(&hw->cvq->lock);
620 VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
625 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
628 unsigned int socket_id __rte_unused,
629 const struct rte_eth_rxconf *rx_conf,
630 struct rte_mempool *mp)
632 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
633 struct virtio_hw *hw = dev->data->dev_private;
634 struct virtqueue *vq = hw->vqs[vq_idx];
635 struct virtnet_rx *rxvq;
636 uint16_t rx_free_thresh;
638 PMD_INIT_FUNC_TRACE();
640 if (rx_conf->rx_deferred_start) {
641 PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
645 rx_free_thresh = rx_conf->rx_free_thresh;
646 if (rx_free_thresh == 0)
648 RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
650 if (rx_free_thresh & 0x3) {
651 RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
652 " (rx_free_thresh=%u port=%u queue=%u)\n",
653 rx_free_thresh, dev->data->port_id, queue_idx);
657 if (rx_free_thresh >= vq->vq_nentries) {
658 RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
659 "number of RX entries (%u)."
660 " (rx_free_thresh=%u port=%u queue=%u)\n",
662 rx_free_thresh, dev->data->port_id, queue_idx);
665 vq->vq_free_thresh = rx_free_thresh;
667 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
668 nb_desc = vq->vq_nentries;
669 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
672 rxvq->queue_id = queue_idx;
674 dev->data->rx_queues[queue_idx] = rxvq;
680 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
682 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
683 struct virtio_hw *hw = dev->data->dev_private;
684 struct virtqueue *vq = hw->vqs[vq_idx];
685 struct virtnet_rx *rxvq = &vq->rxq;
689 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
691 PMD_INIT_FUNC_TRACE();
693 /* Allocate blank mbufs for the each rx descriptor */
696 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
697 for (desc_idx = 0; desc_idx < vq->vq_nentries;
699 vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
700 vq->vq_split.ring.desc[desc_idx].flags =
704 virtio_rxq_vec_setup(rxvq);
707 memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
708 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
709 vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
711 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
712 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
713 virtio_rxq_rearm_vec(rxvq);
714 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
716 } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
717 if ((!virtqueue_full(vq))) {
718 uint16_t free_cnt = vq->vq_free_cnt;
719 struct rte_mbuf *pkts[free_cnt];
721 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
723 error = virtqueue_enqueue_refill_inorder(vq,
726 if (unlikely(error)) {
727 for (i = 0; i < free_cnt; i++)
728 rte_pktmbuf_free(pkts[i]);
733 vq_update_avail_idx(vq);
736 while (!virtqueue_full(vq)) {
737 m = rte_mbuf_raw_alloc(rxvq->mpool);
741 /* Enqueue allocated buffers */
742 if (virtio_with_packed_queue(vq->hw))
743 error = virtqueue_enqueue_recv_refill_packed(vq,
746 error = virtqueue_enqueue_recv_refill(vq,
755 if (!virtio_with_packed_queue(vq->hw))
756 vq_update_avail_idx(vq);
759 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
767 * struct rte_eth_dev *dev: Used to update dev
768 * uint16_t nb_desc: Defaults to values read from config space
769 * unsigned int socket_id: Used to allocate memzone
770 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
771 * uint16_t queue_idx: Just used as an index in dev txq list
774 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
777 unsigned int socket_id __rte_unused,
778 const struct rte_eth_txconf *tx_conf)
780 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
781 struct virtio_hw *hw = dev->data->dev_private;
782 struct virtqueue *vq = hw->vqs[vq_idx];
783 struct virtnet_tx *txvq;
784 uint16_t tx_free_thresh;
786 PMD_INIT_FUNC_TRACE();
788 if (tx_conf->tx_deferred_start) {
789 PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
793 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
794 nb_desc = vq->vq_nentries;
795 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
798 txvq->queue_id = queue_idx;
800 tx_free_thresh = tx_conf->tx_free_thresh;
801 if (tx_free_thresh == 0)
803 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
805 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
806 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
807 "number of TX entries minus 3 (%u)."
808 " (tx_free_thresh=%u port=%u queue=%u)\n",
810 tx_free_thresh, dev->data->port_id, queue_idx);
814 vq->vq_free_thresh = tx_free_thresh;
816 dev->data->tx_queues[queue_idx] = txvq;
821 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
824 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
825 struct virtio_hw *hw = dev->data->dev_private;
826 struct virtqueue *vq = hw->vqs[vq_idx];
828 PMD_INIT_FUNC_TRACE();
830 if (!virtio_with_packed_queue(hw)) {
831 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
832 vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
841 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
845 * Requeue the discarded mbuf. This should always be
846 * successful since it was just dequeued.
848 if (virtio_with_packed_queue(vq->hw))
849 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
851 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
853 if (unlikely(error)) {
854 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
860 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
864 error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
865 if (unlikely(error)) {
866 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
871 /* Optionally fill offload information in structure */
873 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
875 struct rte_net_hdr_lens hdr_lens;
876 uint32_t hdrlen, ptype;
877 int l4_supported = 0;
880 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
883 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
885 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
886 m->packet_type = ptype;
887 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
888 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
889 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
892 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
893 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
894 if (hdr->csum_start <= hdrlen && l4_supported) {
895 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
897 /* Unknown proto or tunnel, do sw cksum. We can assume
898 * the cksum field is in the first segment since the
899 * buffers we provided to the host are large enough.
900 * In case of SCTP, this will be wrong since it's a CRC
901 * but there's nothing we can do.
903 uint16_t csum = 0, off;
905 if (rte_raw_cksum_mbuf(m, hdr->csum_start,
906 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
909 if (likely(csum != 0xffff))
911 off = hdr->csum_offset + hdr->csum_start;
912 if (rte_pktmbuf_data_len(m) >= off + 1)
913 *rte_pktmbuf_mtod_offset(m, uint16_t *,
916 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
917 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
920 /* GSO request, save required information in mbuf */
921 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
922 /* Check unsupported modes */
923 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
924 (hdr->gso_size == 0)) {
928 /* Update mss lengthes in mbuf */
929 m->tso_segsz = hdr->gso_size;
930 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
931 case VIRTIO_NET_HDR_GSO_TCPV4:
932 case VIRTIO_NET_HDR_GSO_TCPV6:
933 m->ol_flags |= PKT_RX_LRO | \
934 PKT_RX_L4_CKSUM_NONE;
944 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
946 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
948 struct virtnet_rx *rxvq = rx_queue;
949 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
950 struct virtio_hw *hw = vq->hw;
951 struct rte_mbuf *rxm;
952 uint16_t nb_used, num, nb_rx;
953 uint32_t len[VIRTIO_MBUF_BURST_SZ];
954 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
956 uint32_t i, nb_enqueued;
958 struct virtio_net_hdr *hdr;
961 if (unlikely(hw->started == 0))
964 nb_used = virtqueue_nused(vq);
966 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
967 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
968 num = VIRTIO_MBUF_BURST_SZ;
969 if (likely(num > DESC_PER_CACHELINE))
970 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
972 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
973 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
976 hdr_size = hw->vtnet_hdr_size;
978 for (i = 0; i < num ; i++) {
981 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
983 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
984 PMD_RX_LOG(ERR, "Packet drop");
986 virtio_discard_rxbuf(vq, rxm);
987 rxvq->stats.errors++;
991 rxm->port = rxvq->port_id;
992 rxm->data_off = RTE_PKTMBUF_HEADROOM;
996 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
997 rxm->data_len = (uint16_t)(len[i] - hdr_size);
999 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1000 RTE_PKTMBUF_HEADROOM - hdr_size);
1003 rte_vlan_strip(rxm);
1005 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1006 virtio_discard_rxbuf(vq, rxm);
1007 rxvq->stats.errors++;
1011 virtio_rx_stats_updated(rxvq, rxm);
1013 rx_pkts[nb_rx++] = rxm;
1016 rxvq->stats.packets += nb_rx;
1018 /* Allocate new mbuf for the used descriptor */
1019 if (likely(!virtqueue_full(vq))) {
1020 uint16_t free_cnt = vq->vq_free_cnt;
1021 struct rte_mbuf *new_pkts[free_cnt];
1023 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1025 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1027 if (unlikely(error)) {
1028 for (i = 0; i < free_cnt; i++)
1029 rte_pktmbuf_free(new_pkts[i]);
1031 nb_enqueued += free_cnt;
1033 struct rte_eth_dev *dev =
1034 &rte_eth_devices[rxvq->port_id];
1035 dev->data->rx_mbuf_alloc_failed += free_cnt;
1039 if (likely(nb_enqueued)) {
1040 vq_update_avail_idx(vq);
1042 if (unlikely(virtqueue_kick_prepare(vq))) {
1043 virtqueue_notify(vq);
1044 PMD_RX_LOG(DEBUG, "Notified");
1052 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1055 struct virtnet_rx *rxvq = rx_queue;
1056 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1057 struct virtio_hw *hw = vq->hw;
1058 struct rte_mbuf *rxm;
1059 uint16_t num, nb_rx;
1060 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1061 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1063 uint32_t i, nb_enqueued;
1065 struct virtio_net_hdr *hdr;
1068 if (unlikely(hw->started == 0))
1071 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1072 if (likely(num > DESC_PER_CACHELINE))
1073 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1075 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1076 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1079 hdr_size = hw->vtnet_hdr_size;
1081 for (i = 0; i < num; i++) {
1084 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1086 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1087 PMD_RX_LOG(ERR, "Packet drop");
1089 virtio_discard_rxbuf(vq, rxm);
1090 rxvq->stats.errors++;
1094 rxm->port = rxvq->port_id;
1095 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1099 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1100 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1102 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1103 RTE_PKTMBUF_HEADROOM - hdr_size);
1106 rte_vlan_strip(rxm);
1108 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1109 virtio_discard_rxbuf(vq, rxm);
1110 rxvq->stats.errors++;
1114 virtio_rx_stats_updated(rxvq, rxm);
1116 rx_pkts[nb_rx++] = rxm;
1119 rxvq->stats.packets += nb_rx;
1121 /* Allocate new mbuf for the used descriptor */
1122 if (likely(!virtqueue_full(vq))) {
1123 uint16_t free_cnt = vq->vq_free_cnt;
1124 struct rte_mbuf *new_pkts[free_cnt];
1126 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1128 error = virtqueue_enqueue_recv_refill_packed(vq,
1129 new_pkts, free_cnt);
1130 if (unlikely(error)) {
1131 for (i = 0; i < free_cnt; i++)
1132 rte_pktmbuf_free(new_pkts[i]);
1134 nb_enqueued += free_cnt;
1136 struct rte_eth_dev *dev =
1137 &rte_eth_devices[rxvq->port_id];
1138 dev->data->rx_mbuf_alloc_failed += free_cnt;
1142 if (likely(nb_enqueued)) {
1143 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1144 virtqueue_notify(vq);
1145 PMD_RX_LOG(DEBUG, "Notified");
1154 virtio_recv_pkts_inorder(void *rx_queue,
1155 struct rte_mbuf **rx_pkts,
1158 struct virtnet_rx *rxvq = rx_queue;
1159 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1160 struct virtio_hw *hw = vq->hw;
1161 struct rte_mbuf *rxm;
1162 struct rte_mbuf *prev = NULL;
1163 uint16_t nb_used, num, nb_rx;
1164 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1165 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1167 uint32_t nb_enqueued;
1174 if (unlikely(hw->started == 0))
1177 nb_used = virtqueue_nused(vq);
1178 nb_used = RTE_MIN(nb_used, nb_pkts);
1179 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1181 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1186 hdr_size = hw->vtnet_hdr_size;
1188 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1190 for (i = 0; i < num; i++) {
1191 struct virtio_net_hdr_mrg_rxbuf *header;
1193 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1194 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1198 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1199 PMD_RX_LOG(ERR, "Packet drop");
1201 virtio_discard_rxbuf_inorder(vq, rxm);
1202 rxvq->stats.errors++;
1206 header = (struct virtio_net_hdr_mrg_rxbuf *)
1207 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1210 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1211 seg_num = header->num_buffers;
1218 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1219 rxm->nb_segs = seg_num;
1222 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1223 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1225 rxm->port = rxvq->port_id;
1227 rx_pkts[nb_rx] = rxm;
1230 if (vq->hw->has_rx_offload &&
1231 virtio_rx_offload(rxm, &header->hdr) < 0) {
1232 virtio_discard_rxbuf_inorder(vq, rxm);
1233 rxvq->stats.errors++;
1238 rte_vlan_strip(rx_pkts[nb_rx]);
1240 seg_res = seg_num - 1;
1242 /* Merge remaining segments */
1243 while (seg_res != 0 && i < (num - 1)) {
1247 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1248 rxm->pkt_len = (uint32_t)(len[i]);
1249 rxm->data_len = (uint16_t)(len[i]);
1251 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1259 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1264 /* Last packet still need merge segments */
1265 while (seg_res != 0) {
1266 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1267 VIRTIO_MBUF_BURST_SZ);
1269 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1270 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1272 uint16_t extra_idx = 0;
1275 while (extra_idx < rcv_cnt) {
1276 rxm = rcv_pkts[extra_idx];
1278 RTE_PKTMBUF_HEADROOM - hdr_size;
1279 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1280 rxm->data_len = (uint16_t)(len[extra_idx]);
1283 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1289 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1294 "No enough segments for packet.");
1295 rte_pktmbuf_free(rx_pkts[nb_rx]);
1296 rxvq->stats.errors++;
1301 rxvq->stats.packets += nb_rx;
1303 /* Allocate new mbuf for the used descriptor */
1305 if (likely(!virtqueue_full(vq))) {
1306 /* free_cnt may include mrg descs */
1307 uint16_t free_cnt = vq->vq_free_cnt;
1308 struct rte_mbuf *new_pkts[free_cnt];
1310 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1311 error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1313 if (unlikely(error)) {
1314 for (i = 0; i < free_cnt; i++)
1315 rte_pktmbuf_free(new_pkts[i]);
1317 nb_enqueued += free_cnt;
1319 struct rte_eth_dev *dev =
1320 &rte_eth_devices[rxvq->port_id];
1321 dev->data->rx_mbuf_alloc_failed += free_cnt;
1325 if (likely(nb_enqueued)) {
1326 vq_update_avail_idx(vq);
1328 if (unlikely(virtqueue_kick_prepare(vq))) {
1329 virtqueue_notify(vq);
1330 PMD_RX_LOG(DEBUG, "Notified");
1338 virtio_recv_mergeable_pkts(void *rx_queue,
1339 struct rte_mbuf **rx_pkts,
1342 struct virtnet_rx *rxvq = rx_queue;
1343 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1344 struct virtio_hw *hw = vq->hw;
1345 struct rte_mbuf *rxm;
1346 struct rte_mbuf *prev = NULL;
1347 uint16_t nb_used, num, nb_rx = 0;
1348 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1349 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1351 uint32_t nb_enqueued = 0;
1352 uint32_t seg_num = 0;
1353 uint32_t seg_res = 0;
1354 uint32_t hdr_size = hw->vtnet_hdr_size;
1357 if (unlikely(hw->started == 0))
1360 nb_used = virtqueue_nused(vq);
1362 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1364 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1365 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1366 num = VIRTIO_MBUF_BURST_SZ;
1367 if (likely(num > DESC_PER_CACHELINE))
1368 num = num - ((vq->vq_used_cons_idx + num) %
1369 DESC_PER_CACHELINE);
1372 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1374 for (i = 0; i < num; i++) {
1375 struct virtio_net_hdr_mrg_rxbuf *header;
1377 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1378 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1382 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1383 PMD_RX_LOG(ERR, "Packet drop");
1385 virtio_discard_rxbuf(vq, rxm);
1386 rxvq->stats.errors++;
1390 header = (struct virtio_net_hdr_mrg_rxbuf *)
1391 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1393 seg_num = header->num_buffers;
1397 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1398 rxm->nb_segs = seg_num;
1401 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1402 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1404 rxm->port = rxvq->port_id;
1406 rx_pkts[nb_rx] = rxm;
1409 if (hw->has_rx_offload &&
1410 virtio_rx_offload(rxm, &header->hdr) < 0) {
1411 virtio_discard_rxbuf(vq, rxm);
1412 rxvq->stats.errors++;
1417 rte_vlan_strip(rx_pkts[nb_rx]);
1419 seg_res = seg_num - 1;
1421 /* Merge remaining segments */
1422 while (seg_res != 0 && i < (num - 1)) {
1426 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1427 rxm->pkt_len = (uint32_t)(len[i]);
1428 rxm->data_len = (uint16_t)(len[i]);
1430 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1438 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1443 /* Last packet still need merge segments */
1444 while (seg_res != 0) {
1445 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1446 VIRTIO_MBUF_BURST_SZ);
1448 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1449 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1451 uint16_t extra_idx = 0;
1454 while (extra_idx < rcv_cnt) {
1455 rxm = rcv_pkts[extra_idx];
1457 RTE_PKTMBUF_HEADROOM - hdr_size;
1458 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1459 rxm->data_len = (uint16_t)(len[extra_idx]);
1462 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1468 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1473 "No enough segments for packet.");
1474 rte_pktmbuf_free(rx_pkts[nb_rx]);
1475 rxvq->stats.errors++;
1480 rxvq->stats.packets += nb_rx;
1482 /* Allocate new mbuf for the used descriptor */
1483 if (likely(!virtqueue_full(vq))) {
1484 /* free_cnt may include mrg descs */
1485 uint16_t free_cnt = vq->vq_free_cnt;
1486 struct rte_mbuf *new_pkts[free_cnt];
1488 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1489 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1491 if (unlikely(error)) {
1492 for (i = 0; i < free_cnt; i++)
1493 rte_pktmbuf_free(new_pkts[i]);
1495 nb_enqueued += free_cnt;
1497 struct rte_eth_dev *dev =
1498 &rte_eth_devices[rxvq->port_id];
1499 dev->data->rx_mbuf_alloc_failed += free_cnt;
1503 if (likely(nb_enqueued)) {
1504 vq_update_avail_idx(vq);
1506 if (unlikely(virtqueue_kick_prepare(vq))) {
1507 virtqueue_notify(vq);
1508 PMD_RX_LOG(DEBUG, "Notified");
1516 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1517 struct rte_mbuf **rx_pkts,
1520 struct virtnet_rx *rxvq = rx_queue;
1521 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1522 struct virtio_hw *hw = vq->hw;
1523 struct rte_mbuf *rxm;
1524 struct rte_mbuf *prev = NULL;
1525 uint16_t num, nb_rx = 0;
1526 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1527 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1528 uint32_t nb_enqueued = 0;
1529 uint32_t seg_num = 0;
1530 uint32_t seg_res = 0;
1531 uint32_t hdr_size = hw->vtnet_hdr_size;
1535 if (unlikely(hw->started == 0))
1540 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1541 num = VIRTIO_MBUF_BURST_SZ;
1542 if (likely(num > DESC_PER_CACHELINE))
1543 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1545 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1547 for (i = 0; i < num; i++) {
1548 struct virtio_net_hdr_mrg_rxbuf *header;
1550 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1551 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1555 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1556 PMD_RX_LOG(ERR, "Packet drop");
1558 virtio_discard_rxbuf(vq, rxm);
1559 rxvq->stats.errors++;
1563 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1564 rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1565 seg_num = header->num_buffers;
1570 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1571 rxm->nb_segs = seg_num;
1574 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1575 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1577 rxm->port = rxvq->port_id;
1578 rx_pkts[nb_rx] = rxm;
1581 if (hw->has_rx_offload &&
1582 virtio_rx_offload(rxm, &header->hdr) < 0) {
1583 virtio_discard_rxbuf(vq, rxm);
1584 rxvq->stats.errors++;
1589 rte_vlan_strip(rx_pkts[nb_rx]);
1591 seg_res = seg_num - 1;
1593 /* Merge remaining segments */
1594 while (seg_res != 0 && i < (num - 1)) {
1598 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1599 rxm->pkt_len = (uint32_t)(len[i]);
1600 rxm->data_len = (uint16_t)(len[i]);
1602 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1610 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1615 /* Last packet still need merge segments */
1616 while (seg_res != 0) {
1617 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1618 VIRTIO_MBUF_BURST_SZ);
1619 uint16_t extra_idx = 0;
1621 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1623 if (unlikely(rcv_cnt == 0)) {
1624 PMD_RX_LOG(ERR, "No enough segments for packet.");
1625 rte_pktmbuf_free(rx_pkts[nb_rx]);
1626 rxvq->stats.errors++;
1630 while (extra_idx < rcv_cnt) {
1631 rxm = rcv_pkts[extra_idx];
1633 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1634 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1635 rxm->data_len = (uint16_t)(len[extra_idx]);
1639 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1644 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1649 rxvq->stats.packets += nb_rx;
1651 /* Allocate new mbuf for the used descriptor */
1652 if (likely(!virtqueue_full(vq))) {
1653 /* free_cnt may include mrg descs */
1654 uint16_t free_cnt = vq->vq_free_cnt;
1655 struct rte_mbuf *new_pkts[free_cnt];
1657 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1658 error = virtqueue_enqueue_recv_refill_packed(vq,
1659 new_pkts, free_cnt);
1660 if (unlikely(error)) {
1661 for (i = 0; i < free_cnt; i++)
1662 rte_pktmbuf_free(new_pkts[i]);
1664 nb_enqueued += free_cnt;
1666 struct rte_eth_dev *dev =
1667 &rte_eth_devices[rxvq->port_id];
1668 dev->data->rx_mbuf_alloc_failed += free_cnt;
1672 if (likely(nb_enqueued)) {
1673 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1674 virtqueue_notify(vq);
1675 PMD_RX_LOG(DEBUG, "Notified");
1683 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1689 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1690 struct rte_mbuf *m = tx_pkts[nb_tx];
1692 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1693 error = rte_validate_tx_offload(m);
1694 if (unlikely(error)) {
1700 /* Do VLAN tag insertion */
1701 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1702 error = rte_vlan_insert(&m);
1703 /* rte_vlan_insert() may change pointer
1704 * even in the case of failure
1708 if (unlikely(error)) {
1714 error = rte_net_intel_cksum_prepare(m);
1715 if (unlikely(error)) {
1720 if (m->ol_flags & PKT_TX_TCP_SEG)
1721 virtio_tso_fix_cksum(m);
1728 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1731 struct virtnet_tx *txvq = tx_queue;
1732 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1733 struct virtio_hw *hw = vq->hw;
1734 uint16_t hdr_size = hw->vtnet_hdr_size;
1736 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1738 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1741 if (unlikely(nb_pkts < 1))
1744 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1746 if (nb_pkts > vq->vq_free_cnt)
1747 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1750 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1751 struct rte_mbuf *txm = tx_pkts[nb_tx];
1752 int can_push = 0, use_indirect = 0, slots, need;
1754 /* optimize ring usage */
1755 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1756 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1757 rte_mbuf_refcnt_read(txm) == 1 &&
1758 RTE_MBUF_DIRECT(txm) &&
1759 txm->nb_segs == 1 &&
1760 rte_pktmbuf_headroom(txm) >= hdr_size &&
1761 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1762 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1764 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1765 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1767 /* How many main ring entries are needed to this Tx?
1769 * any_layout => number of segments
1770 * default => number of segments + 1
1772 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1773 need = slots - vq->vq_free_cnt;
1775 /* Positive value indicates it need free vring descriptors */
1776 if (unlikely(need > 0)) {
1777 virtio_xmit_cleanup_packed(vq, need, in_order);
1778 need = slots - vq->vq_free_cnt;
1779 if (unlikely(need > 0)) {
1781 "No free tx descriptors to transmit");
1786 /* Enqueue Packet buffers */
1788 virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1790 virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1794 virtio_update_packet_stats(&txvq->stats, txm);
1797 txvq->stats.packets += nb_tx;
1799 if (likely(nb_tx)) {
1800 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1801 virtqueue_notify(vq);
1802 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1810 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1812 struct virtnet_tx *txvq = tx_queue;
1813 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1814 struct virtio_hw *hw = vq->hw;
1815 uint16_t hdr_size = hw->vtnet_hdr_size;
1816 uint16_t nb_used, nb_tx = 0;
1818 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1821 if (unlikely(nb_pkts < 1))
1824 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1826 nb_used = virtqueue_nused(vq);
1828 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1829 virtio_xmit_cleanup(vq, nb_used);
1831 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1832 struct rte_mbuf *txm = tx_pkts[nb_tx];
1833 int can_push = 0, use_indirect = 0, slots, need;
1835 /* optimize ring usage */
1836 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1837 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1838 rte_mbuf_refcnt_read(txm) == 1 &&
1839 RTE_MBUF_DIRECT(txm) &&
1840 txm->nb_segs == 1 &&
1841 rte_pktmbuf_headroom(txm) >= hdr_size &&
1842 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1843 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1845 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1846 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1849 /* How many main ring entries are needed to this Tx?
1850 * any_layout => number of segments
1852 * default => number of segments + 1
1854 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1855 need = slots - vq->vq_free_cnt;
1857 /* Positive value indicates it need free vring descriptors */
1858 if (unlikely(need > 0)) {
1859 nb_used = virtqueue_nused(vq);
1861 need = RTE_MIN(need, (int)nb_used);
1863 virtio_xmit_cleanup(vq, need);
1864 need = slots - vq->vq_free_cnt;
1865 if (unlikely(need > 0)) {
1867 "No free tx descriptors to transmit");
1872 /* Enqueue Packet buffers */
1873 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1876 virtio_update_packet_stats(&txvq->stats, txm);
1879 txvq->stats.packets += nb_tx;
1881 if (likely(nb_tx)) {
1882 vq_update_avail_idx(vq);
1884 if (unlikely(virtqueue_kick_prepare(vq))) {
1885 virtqueue_notify(vq);
1886 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1893 static __rte_always_inline int
1894 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1896 uint16_t nb_used, nb_clean, nb_descs;
1898 nb_descs = vq->vq_free_cnt + need;
1899 nb_used = virtqueue_nused(vq);
1900 nb_clean = RTE_MIN(need, (int)nb_used);
1902 virtio_xmit_cleanup_inorder(vq, nb_clean);
1904 return nb_descs - vq->vq_free_cnt;
1908 virtio_xmit_pkts_inorder(void *tx_queue,
1909 struct rte_mbuf **tx_pkts,
1912 struct virtnet_tx *txvq = tx_queue;
1913 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1914 struct virtio_hw *hw = vq->hw;
1915 uint16_t hdr_size = hw->vtnet_hdr_size;
1916 uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1917 struct rte_mbuf *inorder_pkts[nb_pkts];
1920 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1923 if (unlikely(nb_pkts < 1))
1927 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1928 nb_used = virtqueue_nused(vq);
1930 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1931 virtio_xmit_cleanup_inorder(vq, nb_used);
1933 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1934 struct rte_mbuf *txm = tx_pkts[nb_tx];
1937 /* optimize ring usage */
1938 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1939 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1940 rte_mbuf_refcnt_read(txm) == 1 &&
1941 RTE_MBUF_DIRECT(txm) &&
1942 txm->nb_segs == 1 &&
1943 rte_pktmbuf_headroom(txm) >= hdr_size &&
1944 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1945 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1946 inorder_pkts[nb_inorder_pkts] = txm;
1952 if (nb_inorder_pkts) {
1953 need = nb_inorder_pkts - vq->vq_free_cnt;
1954 if (unlikely(need > 0)) {
1955 need = virtio_xmit_try_cleanup_inorder(vq,
1957 if (unlikely(need > 0)) {
1959 "No free tx descriptors to "
1964 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1966 nb_inorder_pkts = 0;
1969 slots = txm->nb_segs + 1;
1970 need = slots - vq->vq_free_cnt;
1971 if (unlikely(need > 0)) {
1972 need = virtio_xmit_try_cleanup_inorder(vq, slots);
1974 if (unlikely(need > 0)) {
1976 "No free tx descriptors to transmit");
1980 /* Enqueue Packet buffers */
1981 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
1983 virtio_update_packet_stats(&txvq->stats, txm);
1986 /* Transmit all inorder packets */
1987 if (nb_inorder_pkts) {
1988 need = nb_inorder_pkts - vq->vq_free_cnt;
1989 if (unlikely(need > 0)) {
1990 need = virtio_xmit_try_cleanup_inorder(vq,
1992 if (unlikely(need > 0)) {
1994 "No free tx descriptors to transmit");
1995 nb_inorder_pkts = vq->vq_free_cnt;
2000 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2004 txvq->stats.packets += nb_tx;
2006 if (likely(nb_tx)) {
2007 vq_update_avail_idx(vq);
2009 if (unlikely(virtqueue_kick_prepare(vq))) {
2010 virtqueue_notify(vq);
2011 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2021 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2022 struct rte_mbuf **rx_pkts __rte_unused,
2023 uint16_t nb_pkts __rte_unused)
2029 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2030 struct rte_mbuf **tx_pkts __rte_unused,
2031 uint16_t nb_pkts __rte_unused)