1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
45 struct virtnet_rx *rxvq = rxq;
46 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
48 return virtqueue_nused(vq) >= offset;
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
54 vq->vq_free_cnt += num;
55 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
61 struct vring_desc *dp, *dp_tail;
62 struct vq_desc_extra *dxp;
63 uint16_t desc_idx_last = desc_idx;
65 dp = &vq->vq_split.ring.desc[desc_idx];
66 dxp = &vq->vq_descx[desc_idx];
67 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 while (dp->flags & VRING_DESC_F_NEXT) {
70 desc_idx_last = dp->next;
71 dp = &vq->vq_split.ring.desc[dp->next];
77 * We must append the existing free chain, if any, to the end of
78 * newly freed chain. If the virtqueue was completely used, then
79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
81 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 vq->vq_desc_head_idx = desc_idx;
84 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85 dp_tail->next = desc_idx;
88 vq->vq_desc_tail_idx = desc_idx_last;
89 dp->next = VQ_RING_DESC_CHAIN_END;
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
95 uint32_t s = mbuf->pkt_len;
96 struct rte_ether_addr *ea;
101 stats->size_bins[1]++;
102 } else if (s > 64 && s < 1024) {
105 /* count zeros, and offset into correct bin */
106 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107 stats->size_bins[bin]++;
110 stats->size_bins[0]++;
112 stats->size_bins[6]++;
114 stats->size_bins[7]++;
117 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118 if (rte_is_multicast_ether_addr(ea)) {
119 if (rte_is_broadcast_ether_addr(ea))
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
129 VIRTIO_DUMP_PACKET(m, m->data_len);
131 virtio_update_packet_stats(&rxvq->stats, m);
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136 struct rte_mbuf **rx_pkts,
140 struct rte_mbuf *cookie;
143 struct vring_packed_desc *desc;
146 desc = vq->vq_packed.ring.desc;
148 for (i = 0; i < num; i++) {
149 used_idx = vq->vq_used_cons_idx;
150 /* desc_is_used has a load-acquire or rte_io_rmb inside
151 * and wait for used desc in virtqueue.
153 if (!desc_is_used(&desc[used_idx], vq))
155 len[i] = desc[used_idx].len;
156 id = desc[used_idx].id;
157 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158 if (unlikely(cookie == NULL)) {
159 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160 vq->vq_used_cons_idx);
163 rte_prefetch0(cookie);
164 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
168 vq->vq_used_cons_idx++;
169 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170 vq->vq_used_cons_idx -= vq->vq_nentries;
171 vq->vq_packed.used_wrap_counter ^= 1;
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180 uint32_t *len, uint16_t num)
182 struct vring_used_elem *uep;
183 struct rte_mbuf *cookie;
184 uint16_t used_idx, desc_idx;
187 /* Caller does the check */
188 for (i = 0; i < num ; i++) {
189 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190 uep = &vq->vq_split.ring.used->ring[used_idx];
191 desc_idx = (uint16_t) uep->id;
193 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
195 if (unlikely(cookie == NULL)) {
196 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197 vq->vq_used_cons_idx);
201 rte_prefetch0(cookie);
202 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
204 vq->vq_used_cons_idx++;
205 vq_ring_free_chain(vq, desc_idx);
206 vq->vq_descx[desc_idx].cookie = NULL;
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214 struct rte_mbuf **rx_pkts,
218 struct vring_used_elem *uep;
219 struct rte_mbuf *cookie;
220 uint16_t used_idx = 0;
223 if (unlikely(num == 0))
226 for (i = 0; i < num; i++) {
227 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228 /* Desc idx same as used idx */
229 uep = &vq->vq_split.ring.used->ring[used_idx];
231 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
233 if (unlikely(cookie == NULL)) {
234 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235 vq->vq_used_cons_idx);
239 rte_prefetch0(cookie);
240 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
242 vq->vq_used_cons_idx++;
243 vq->vq_descx[used_idx].cookie = NULL;
246 vq_ring_free_inorder(vq, used_idx, i);
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252 struct rte_mbuf **cookies,
255 struct vq_desc_extra *dxp;
256 struct virtio_hw *hw = vq->hw;
257 struct vring_desc *start_dp;
258 uint16_t head_idx, idx, i = 0;
260 if (unlikely(vq->vq_free_cnt == 0))
262 if (unlikely(vq->vq_free_cnt < num))
265 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266 start_dp = vq->vq_split.ring.desc;
269 idx = head_idx & (vq->vq_nentries - 1);
270 dxp = &vq->vq_descx[idx];
271 dxp->cookie = (void *)cookies[i];
274 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
275 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
276 start_dp[idx].len = cookies[i]->buf_len -
277 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
278 start_dp[idx].flags = VRING_DESC_F_WRITE;
280 vq_update_avail_ring(vq, idx);
285 vq->vq_desc_head_idx += num;
286 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
294 struct vq_desc_extra *dxp;
295 struct virtio_hw *hw = vq->hw;
296 struct vring_desc *start_dp = vq->vq_split.ring.desc;
299 if (unlikely(vq->vq_free_cnt == 0))
301 if (unlikely(vq->vq_free_cnt < num))
304 if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
307 for (i = 0; i < num; i++) {
308 idx = vq->vq_desc_head_idx;
309 dxp = &vq->vq_descx[idx];
310 dxp->cookie = (void *)cookie[i];
313 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
314 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
315 start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
317 start_dp[idx].flags = VRING_DESC_F_WRITE;
318 vq->vq_desc_head_idx = start_dp[idx].next;
319 vq_update_avail_ring(vq, idx);
320 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
321 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
326 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
332 virtqueue_refill_single_packed(struct virtqueue *vq,
333 struct vring_packed_desc *dp,
334 struct rte_mbuf *cookie)
336 uint16_t flags = vq->vq_packed.cached_flags;
337 struct virtio_hw *hw = vq->hw;
339 dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
340 dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
342 virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
344 if (++vq->vq_avail_idx >= vq->vq_nentries) {
345 vq->vq_avail_idx -= vq->vq_nentries;
346 vq->vq_packed.cached_flags ^=
347 VRING_PACKED_DESC_F_AVAIL_USED;
348 flags = vq->vq_packed.cached_flags;
353 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
354 struct rte_mbuf **cookie, uint16_t num)
356 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
357 struct vq_desc_extra *dxp;
361 if (unlikely(vq->vq_free_cnt == 0))
363 if (unlikely(vq->vq_free_cnt < num))
366 for (i = 0; i < num; i++) {
367 idx = vq->vq_avail_idx;
368 dxp = &vq->vq_descx[idx];
369 dxp->cookie = (void *)cookie[i];
372 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
374 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
379 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
380 struct rte_mbuf **cookie, uint16_t num)
382 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
383 struct vq_desc_extra *dxp;
387 if (unlikely(vq->vq_free_cnt == 0))
389 if (unlikely(vq->vq_free_cnt < num))
392 for (i = 0; i < num; i++) {
393 idx = vq->vq_avail_idx;
394 did = start_dp[idx].id;
395 dxp = &vq->vq_descx[did];
396 dxp->cookie = (void *)cookie[i];
399 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
401 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
405 /* When doing TSO, the IP length is not included in the pseudo header
406 * checksum of the packet given to the PMD, but for virtio it is
410 virtio_tso_fix_cksum(struct rte_mbuf *m)
412 /* common case: header is not fragmented */
413 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
415 struct rte_ipv4_hdr *iph;
416 struct rte_ipv6_hdr *ip6h;
417 struct rte_tcp_hdr *th;
418 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
421 iph = rte_pktmbuf_mtod_offset(m,
422 struct rte_ipv4_hdr *, m->l2_len);
423 th = RTE_PTR_ADD(iph, m->l3_len);
424 if ((iph->version_ihl >> 4) == 4) {
425 iph->hdr_checksum = 0;
426 iph->hdr_checksum = rte_ipv4_cksum(iph);
427 ip_len = iph->total_length;
428 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
431 ip6h = (struct rte_ipv6_hdr *)iph;
432 ip_paylen = ip6h->payload_len;
435 /* calculate the new phdr checksum not including ip_paylen */
436 prev_cksum = th->cksum;
439 tmp = (tmp & 0xffff) + (tmp >> 16);
442 /* replace it in the packet */
443 th->cksum = new_cksum;
451 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
452 struct rte_mbuf **cookies,
455 struct vq_desc_extra *dxp;
456 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
457 struct vring_desc *start_dp;
458 struct virtio_net_hdr *hdr;
460 int16_t head_size = vq->hw->vtnet_hdr_size;
463 idx = vq->vq_desc_head_idx;
464 start_dp = vq->vq_split.ring.desc;
467 idx = idx & (vq->vq_nentries - 1);
468 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
469 dxp->cookie = (void *)cookies[i];
471 virtio_update_packet_stats(&txvq->stats, cookies[i]);
473 hdr = rte_pktmbuf_mtod_offset(cookies[i],
474 struct virtio_net_hdr *, -head_size);
476 /* if offload disabled, hdr is not zeroed yet, do it now */
477 if (!vq->hw->has_tx_offload)
478 virtqueue_clear_net_hdr(hdr);
480 virtqueue_xmit_offload(hdr, cookies[i]);
482 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
483 start_dp[idx].len = cookies[i]->data_len + head_size;
484 start_dp[idx].flags = 0;
487 vq_update_avail_ring(vq, idx);
493 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
494 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
498 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
499 struct rte_mbuf *cookie,
502 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
503 struct vring_packed_desc *dp;
504 struct vq_desc_extra *dxp;
505 uint16_t idx, id, flags;
506 int16_t head_size = vq->hw->vtnet_hdr_size;
507 struct virtio_net_hdr *hdr;
509 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
510 idx = vq->vq_avail_idx;
511 dp = &vq->vq_packed.ring.desc[idx];
513 dxp = &vq->vq_descx[id];
515 dxp->cookie = cookie;
517 flags = vq->vq_packed.cached_flags;
519 /* prepend cannot fail, checked by caller */
520 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
523 /* if offload disabled, hdr is not zeroed yet, do it now */
524 if (!vq->hw->has_tx_offload)
525 virtqueue_clear_net_hdr(hdr);
527 virtqueue_xmit_offload(hdr, cookie);
529 dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
530 dp->len = cookie->data_len + head_size;
533 if (++vq->vq_avail_idx >= vq->vq_nentries) {
534 vq->vq_avail_idx -= vq->vq_nentries;
535 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
541 vq->vq_desc_head_idx = dxp->next;
542 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
543 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
546 virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
550 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
551 uint16_t needed, int use_indirect, int can_push,
554 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
555 struct vq_desc_extra *dxp;
556 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
557 struct vring_desc *start_dp;
558 uint16_t seg_num = cookie->nb_segs;
559 uint16_t head_idx, idx;
560 int16_t head_size = vq->hw->vtnet_hdr_size;
561 bool prepend_header = false;
562 struct virtio_net_hdr *hdr;
564 head_idx = vq->vq_desc_head_idx;
567 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
569 dxp = &vq->vq_descx[idx];
570 dxp->cookie = (void *)cookie;
571 dxp->ndescs = needed;
573 start_dp = vq->vq_split.ring.desc;
576 /* prepend cannot fail, checked by caller */
577 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
579 prepend_header = true;
581 /* if offload disabled, it is not zeroed below, do it now */
582 if (!vq->hw->has_tx_offload)
583 virtqueue_clear_net_hdr(hdr);
584 } else if (use_indirect) {
585 /* setup tx ring slot to point to indirect
586 * descriptor list stored in reserved region.
588 * the first slot in indirect ring is already preset
589 * to point to the header in reserved region
591 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
592 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
593 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
594 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
595 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
597 /* loop below will fill in rest of the indirect elements */
598 start_dp = txr[idx].tx_indir;
601 /* setup first tx ring slot to point to header
602 * stored in reserved region.
604 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
605 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
606 start_dp[idx].len = vq->hw->vtnet_hdr_size;
607 start_dp[idx].flags = VRING_DESC_F_NEXT;
608 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
610 idx = start_dp[idx].next;
613 if (vq->hw->has_tx_offload)
614 virtqueue_xmit_offload(hdr, cookie);
617 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
618 start_dp[idx].len = cookie->data_len;
619 if (prepend_header) {
620 start_dp[idx].addr -= head_size;
621 start_dp[idx].len += head_size;
622 prepend_header = false;
624 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
625 idx = start_dp[idx].next;
626 } while ((cookie = cookie->next) != NULL);
629 idx = vq->vq_split.ring.desc[head_idx].next;
631 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
633 vq->vq_desc_head_idx = idx;
634 vq_update_avail_ring(vq, head_idx);
637 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
638 vq->vq_desc_tail_idx = idx;
643 virtio_dev_cq_start(struct rte_eth_dev *dev)
645 struct virtio_hw *hw = dev->data->dev_private;
648 rte_spinlock_init(&hw->cvq->lock);
649 VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
654 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
657 unsigned int socket_id __rte_unused,
658 const struct rte_eth_rxconf *rx_conf,
659 struct rte_mempool *mp)
661 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
662 struct virtio_hw *hw = dev->data->dev_private;
663 struct virtqueue *vq = hw->vqs[vq_idx];
664 struct virtnet_rx *rxvq;
665 uint16_t rx_free_thresh;
669 PMD_INIT_FUNC_TRACE();
671 if (rx_conf->rx_deferred_start) {
672 PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
676 buf_size = virtio_rx_mem_pool_buf_size(mp);
677 if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
678 hw->rx_ol_scatter, &error)) {
679 PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
684 rx_free_thresh = rx_conf->rx_free_thresh;
685 if (rx_free_thresh == 0)
687 RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
689 if (rx_free_thresh & 0x3) {
690 PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
691 " (rx_free_thresh=%u port=%u queue=%u)",
692 rx_free_thresh, dev->data->port_id, queue_idx);
696 if (rx_free_thresh >= vq->vq_nentries) {
697 PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
698 "number of RX entries (%u)."
699 " (rx_free_thresh=%u port=%u queue=%u)",
701 rx_free_thresh, dev->data->port_id, queue_idx);
704 vq->vq_free_thresh = rx_free_thresh;
707 * For split ring vectorized path descriptors number must be
708 * equal to the ring size.
710 if (nb_desc > vq->vq_nentries ||
711 (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
712 nb_desc = vq->vq_nentries;
714 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
717 rxvq->queue_id = queue_idx;
719 dev->data->rx_queues[queue_idx] = rxvq;
725 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
727 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
728 struct virtio_hw *hw = dev->data->dev_private;
729 struct virtqueue *vq = hw->vqs[vq_idx];
730 struct virtnet_rx *rxvq = &vq->rxq;
734 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
736 PMD_INIT_FUNC_TRACE();
738 /* Allocate blank mbufs for the each rx descriptor */
741 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
742 for (desc_idx = 0; desc_idx < vq->vq_nentries;
744 vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
745 vq->vq_split.ring.desc[desc_idx].flags =
749 virtio_rxq_vec_setup(rxvq);
752 memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
753 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
754 vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
756 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
757 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
758 virtio_rxq_rearm_vec(rxvq);
759 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
761 } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
762 if ((!virtqueue_full(vq))) {
763 uint16_t free_cnt = vq->vq_free_cnt;
764 struct rte_mbuf *pkts[free_cnt];
766 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
768 error = virtqueue_enqueue_refill_inorder(vq,
771 if (unlikely(error)) {
772 for (i = 0; i < free_cnt; i++)
773 rte_pktmbuf_free(pkts[i]);
779 vq_update_avail_idx(vq);
782 while (!virtqueue_full(vq)) {
783 m = rte_mbuf_raw_alloc(rxvq->mpool);
787 /* Enqueue allocated buffers */
788 if (virtio_with_packed_queue(vq->hw))
789 error = virtqueue_enqueue_recv_refill_packed_init(vq,
792 error = virtqueue_enqueue_recv_refill(vq,
801 if (!virtio_with_packed_queue(vq->hw))
802 vq_update_avail_idx(vq);
805 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
813 * struct rte_eth_dev *dev: Used to update dev
814 * uint16_t nb_desc: Defaults to values read from config space
815 * unsigned int socket_id: Used to allocate memzone
816 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
817 * uint16_t queue_idx: Just used as an index in dev txq list
820 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
823 unsigned int socket_id __rte_unused,
824 const struct rte_eth_txconf *tx_conf)
826 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
827 struct virtio_hw *hw = dev->data->dev_private;
828 struct virtqueue *vq = hw->vqs[vq_idx];
829 struct virtnet_tx *txvq;
830 uint16_t tx_free_thresh;
832 PMD_INIT_FUNC_TRACE();
834 if (tx_conf->tx_deferred_start) {
835 PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
839 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
840 nb_desc = vq->vq_nentries;
841 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
844 txvq->queue_id = queue_idx;
846 tx_free_thresh = tx_conf->tx_free_thresh;
847 if (tx_free_thresh == 0)
849 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
851 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
852 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
853 "number of TX entries minus 3 (%u)."
854 " (tx_free_thresh=%u port=%u queue=%u)",
856 tx_free_thresh, dev->data->port_id, queue_idx);
860 vq->vq_free_thresh = tx_free_thresh;
862 dev->data->tx_queues[queue_idx] = txvq;
867 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
870 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
871 struct virtio_hw *hw = dev->data->dev_private;
872 struct virtqueue *vq = hw->vqs[vq_idx];
874 PMD_INIT_FUNC_TRACE();
876 if (!virtio_with_packed_queue(hw)) {
877 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
878 vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
887 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
891 * Requeue the discarded mbuf. This should always be
892 * successful since it was just dequeued.
894 if (virtio_with_packed_queue(vq->hw))
895 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
897 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
899 if (unlikely(error)) {
900 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
906 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
910 error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
911 if (unlikely(error)) {
912 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
917 /* Optionally fill offload information in structure */
919 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
921 struct rte_net_hdr_lens hdr_lens;
922 uint32_t hdrlen, ptype;
923 int l4_supported = 0;
926 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
929 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
931 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
932 m->packet_type = ptype;
933 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
934 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
935 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
938 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
939 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
940 if (hdr->csum_start <= hdrlen && l4_supported) {
941 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
943 /* Unknown proto or tunnel, do sw cksum. We can assume
944 * the cksum field is in the first segment since the
945 * buffers we provided to the host are large enough.
946 * In case of SCTP, this will be wrong since it's a CRC
947 * but there's nothing we can do.
949 uint16_t csum = 0, off;
951 if (rte_raw_cksum_mbuf(m, hdr->csum_start,
952 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
955 if (likely(csum != 0xffff))
957 off = hdr->csum_offset + hdr->csum_start;
958 if (rte_pktmbuf_data_len(m) >= off + 1)
959 *rte_pktmbuf_mtod_offset(m, uint16_t *,
962 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
963 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
966 /* GSO request, save required information in mbuf */
967 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
968 /* Check unsupported modes */
969 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
970 (hdr->gso_size == 0)) {
974 /* Update mss lengthes in mbuf */
975 m->tso_segsz = hdr->gso_size;
976 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
977 case VIRTIO_NET_HDR_GSO_TCPV4:
978 case VIRTIO_NET_HDR_GSO_TCPV6:
979 m->ol_flags |= PKT_RX_LRO | \
980 PKT_RX_L4_CKSUM_NONE;
990 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
992 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
994 struct virtnet_rx *rxvq = rx_queue;
995 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
996 struct virtio_hw *hw = vq->hw;
997 struct rte_mbuf *rxm;
998 uint16_t nb_used, num, nb_rx;
999 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1000 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1002 uint32_t i, nb_enqueued;
1004 struct virtio_net_hdr *hdr;
1007 if (unlikely(hw->started == 0))
1010 nb_used = virtqueue_nused(vq);
1012 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1013 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1014 num = VIRTIO_MBUF_BURST_SZ;
1015 if (likely(num > DESC_PER_CACHELINE))
1016 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1018 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1019 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1022 hdr_size = hw->vtnet_hdr_size;
1024 for (i = 0; i < num ; i++) {
1027 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1029 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1030 PMD_RX_LOG(ERR, "Packet drop");
1032 virtio_discard_rxbuf(vq, rxm);
1033 rxvq->stats.errors++;
1037 rxm->port = rxvq->port_id;
1038 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1042 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1043 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1045 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1046 RTE_PKTMBUF_HEADROOM - hdr_size);
1049 rte_vlan_strip(rxm);
1051 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1052 virtio_discard_rxbuf(vq, rxm);
1053 rxvq->stats.errors++;
1057 virtio_rx_stats_updated(rxvq, rxm);
1059 rx_pkts[nb_rx++] = rxm;
1062 rxvq->stats.packets += nb_rx;
1064 /* Allocate new mbuf for the used descriptor */
1065 if (likely(!virtqueue_full(vq))) {
1066 uint16_t free_cnt = vq->vq_free_cnt;
1067 struct rte_mbuf *new_pkts[free_cnt];
1069 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1071 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1073 if (unlikely(error)) {
1074 for (i = 0; i < free_cnt; i++)
1075 rte_pktmbuf_free(new_pkts[i]);
1077 nb_enqueued += free_cnt;
1079 struct rte_eth_dev *dev =
1080 &rte_eth_devices[rxvq->port_id];
1081 dev->data->rx_mbuf_alloc_failed += free_cnt;
1085 if (likely(nb_enqueued)) {
1086 vq_update_avail_idx(vq);
1088 if (unlikely(virtqueue_kick_prepare(vq))) {
1089 virtqueue_notify(vq);
1090 PMD_RX_LOG(DEBUG, "Notified");
1098 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1101 struct virtnet_rx *rxvq = rx_queue;
1102 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1103 struct virtio_hw *hw = vq->hw;
1104 struct rte_mbuf *rxm;
1105 uint16_t num, nb_rx;
1106 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1107 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1109 uint32_t i, nb_enqueued;
1111 struct virtio_net_hdr *hdr;
1114 if (unlikely(hw->started == 0))
1117 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1118 if (likely(num > DESC_PER_CACHELINE))
1119 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1121 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1122 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1125 hdr_size = hw->vtnet_hdr_size;
1127 for (i = 0; i < num; i++) {
1130 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1132 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1133 PMD_RX_LOG(ERR, "Packet drop");
1135 virtio_discard_rxbuf(vq, rxm);
1136 rxvq->stats.errors++;
1140 rxm->port = rxvq->port_id;
1141 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1145 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1146 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1148 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1149 RTE_PKTMBUF_HEADROOM - hdr_size);
1152 rte_vlan_strip(rxm);
1154 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1155 virtio_discard_rxbuf(vq, rxm);
1156 rxvq->stats.errors++;
1160 virtio_rx_stats_updated(rxvq, rxm);
1162 rx_pkts[nb_rx++] = rxm;
1165 rxvq->stats.packets += nb_rx;
1167 /* Allocate new mbuf for the used descriptor */
1168 if (likely(!virtqueue_full(vq))) {
1169 uint16_t free_cnt = vq->vq_free_cnt;
1170 struct rte_mbuf *new_pkts[free_cnt];
1172 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1174 error = virtqueue_enqueue_recv_refill_packed(vq,
1175 new_pkts, free_cnt);
1176 if (unlikely(error)) {
1177 for (i = 0; i < free_cnt; i++)
1178 rte_pktmbuf_free(new_pkts[i]);
1180 nb_enqueued += free_cnt;
1182 struct rte_eth_dev *dev =
1183 &rte_eth_devices[rxvq->port_id];
1184 dev->data->rx_mbuf_alloc_failed += free_cnt;
1188 if (likely(nb_enqueued)) {
1189 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1190 virtqueue_notify(vq);
1191 PMD_RX_LOG(DEBUG, "Notified");
1200 virtio_recv_pkts_inorder(void *rx_queue,
1201 struct rte_mbuf **rx_pkts,
1204 struct virtnet_rx *rxvq = rx_queue;
1205 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1206 struct virtio_hw *hw = vq->hw;
1207 struct rte_mbuf *rxm;
1208 struct rte_mbuf *prev = NULL;
1209 uint16_t nb_used, num, nb_rx;
1210 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1211 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1213 uint32_t nb_enqueued;
1220 if (unlikely(hw->started == 0))
1223 nb_used = virtqueue_nused(vq);
1224 nb_used = RTE_MIN(nb_used, nb_pkts);
1225 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1227 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1232 hdr_size = hw->vtnet_hdr_size;
1234 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1236 for (i = 0; i < num; i++) {
1237 struct virtio_net_hdr_mrg_rxbuf *header;
1239 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1240 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1244 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1245 PMD_RX_LOG(ERR, "Packet drop");
1247 virtio_discard_rxbuf_inorder(vq, rxm);
1248 rxvq->stats.errors++;
1252 header = (struct virtio_net_hdr_mrg_rxbuf *)
1253 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1256 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1257 seg_num = header->num_buffers;
1264 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1265 rxm->nb_segs = seg_num;
1268 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1269 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1271 rxm->port = rxvq->port_id;
1273 rx_pkts[nb_rx] = rxm;
1276 if (vq->hw->has_rx_offload &&
1277 virtio_rx_offload(rxm, &header->hdr) < 0) {
1278 virtio_discard_rxbuf_inorder(vq, rxm);
1279 rxvq->stats.errors++;
1284 rte_vlan_strip(rx_pkts[nb_rx]);
1286 seg_res = seg_num - 1;
1288 /* Merge remaining segments */
1289 while (seg_res != 0 && i < (num - 1)) {
1293 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1294 rxm->pkt_len = (uint32_t)(len[i]);
1295 rxm->data_len = (uint16_t)(len[i]);
1297 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1305 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1310 /* Last packet still need merge segments */
1311 while (seg_res != 0) {
1312 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1313 VIRTIO_MBUF_BURST_SZ);
1315 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1316 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1318 uint16_t extra_idx = 0;
1321 while (extra_idx < rcv_cnt) {
1322 rxm = rcv_pkts[extra_idx];
1324 RTE_PKTMBUF_HEADROOM - hdr_size;
1325 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1326 rxm->data_len = (uint16_t)(len[extra_idx]);
1329 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1335 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1340 "No enough segments for packet.");
1341 rte_pktmbuf_free(rx_pkts[nb_rx]);
1342 rxvq->stats.errors++;
1347 rxvq->stats.packets += nb_rx;
1349 /* Allocate new mbuf for the used descriptor */
1351 if (likely(!virtqueue_full(vq))) {
1352 /* free_cnt may include mrg descs */
1353 uint16_t free_cnt = vq->vq_free_cnt;
1354 struct rte_mbuf *new_pkts[free_cnt];
1356 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1357 error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1359 if (unlikely(error)) {
1360 for (i = 0; i < free_cnt; i++)
1361 rte_pktmbuf_free(new_pkts[i]);
1363 nb_enqueued += free_cnt;
1365 struct rte_eth_dev *dev =
1366 &rte_eth_devices[rxvq->port_id];
1367 dev->data->rx_mbuf_alloc_failed += free_cnt;
1371 if (likely(nb_enqueued)) {
1372 vq_update_avail_idx(vq);
1374 if (unlikely(virtqueue_kick_prepare(vq))) {
1375 virtqueue_notify(vq);
1376 PMD_RX_LOG(DEBUG, "Notified");
1384 virtio_recv_mergeable_pkts(void *rx_queue,
1385 struct rte_mbuf **rx_pkts,
1388 struct virtnet_rx *rxvq = rx_queue;
1389 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1390 struct virtio_hw *hw = vq->hw;
1391 struct rte_mbuf *rxm;
1392 struct rte_mbuf *prev = NULL;
1393 uint16_t nb_used, num, nb_rx = 0;
1394 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1395 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1397 uint32_t nb_enqueued = 0;
1398 uint32_t seg_num = 0;
1399 uint32_t seg_res = 0;
1400 uint32_t hdr_size = hw->vtnet_hdr_size;
1403 if (unlikely(hw->started == 0))
1406 nb_used = virtqueue_nused(vq);
1408 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1410 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1411 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1412 num = VIRTIO_MBUF_BURST_SZ;
1413 if (likely(num > DESC_PER_CACHELINE))
1414 num = num - ((vq->vq_used_cons_idx + num) %
1415 DESC_PER_CACHELINE);
1418 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1420 for (i = 0; i < num; i++) {
1421 struct virtio_net_hdr_mrg_rxbuf *header;
1423 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1424 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1428 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1429 PMD_RX_LOG(ERR, "Packet drop");
1431 virtio_discard_rxbuf(vq, rxm);
1432 rxvq->stats.errors++;
1436 header = (struct virtio_net_hdr_mrg_rxbuf *)
1437 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1439 seg_num = header->num_buffers;
1443 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1444 rxm->nb_segs = seg_num;
1447 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1448 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1450 rxm->port = rxvq->port_id;
1452 rx_pkts[nb_rx] = rxm;
1455 if (hw->has_rx_offload &&
1456 virtio_rx_offload(rxm, &header->hdr) < 0) {
1457 virtio_discard_rxbuf(vq, rxm);
1458 rxvq->stats.errors++;
1463 rte_vlan_strip(rx_pkts[nb_rx]);
1465 seg_res = seg_num - 1;
1467 /* Merge remaining segments */
1468 while (seg_res != 0 && i < (num - 1)) {
1472 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1473 rxm->pkt_len = (uint32_t)(len[i]);
1474 rxm->data_len = (uint16_t)(len[i]);
1476 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1484 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1489 /* Last packet still need merge segments */
1490 while (seg_res != 0) {
1491 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1492 VIRTIO_MBUF_BURST_SZ);
1494 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1495 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1497 uint16_t extra_idx = 0;
1500 while (extra_idx < rcv_cnt) {
1501 rxm = rcv_pkts[extra_idx];
1503 RTE_PKTMBUF_HEADROOM - hdr_size;
1504 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1505 rxm->data_len = (uint16_t)(len[extra_idx]);
1508 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1514 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1519 "No enough segments for packet.");
1520 rte_pktmbuf_free(rx_pkts[nb_rx]);
1521 rxvq->stats.errors++;
1526 rxvq->stats.packets += nb_rx;
1528 /* Allocate new mbuf for the used descriptor */
1529 if (likely(!virtqueue_full(vq))) {
1530 /* free_cnt may include mrg descs */
1531 uint16_t free_cnt = vq->vq_free_cnt;
1532 struct rte_mbuf *new_pkts[free_cnt];
1534 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1535 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1537 if (unlikely(error)) {
1538 for (i = 0; i < free_cnt; i++)
1539 rte_pktmbuf_free(new_pkts[i]);
1541 nb_enqueued += free_cnt;
1543 struct rte_eth_dev *dev =
1544 &rte_eth_devices[rxvq->port_id];
1545 dev->data->rx_mbuf_alloc_failed += free_cnt;
1549 if (likely(nb_enqueued)) {
1550 vq_update_avail_idx(vq);
1552 if (unlikely(virtqueue_kick_prepare(vq))) {
1553 virtqueue_notify(vq);
1554 PMD_RX_LOG(DEBUG, "Notified");
1562 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1563 struct rte_mbuf **rx_pkts,
1566 struct virtnet_rx *rxvq = rx_queue;
1567 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1568 struct virtio_hw *hw = vq->hw;
1569 struct rte_mbuf *rxm;
1570 struct rte_mbuf *prev = NULL;
1571 uint16_t num, nb_rx = 0;
1572 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1573 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1574 uint32_t nb_enqueued = 0;
1575 uint32_t seg_num = 0;
1576 uint32_t seg_res = 0;
1577 uint32_t hdr_size = hw->vtnet_hdr_size;
1581 if (unlikely(hw->started == 0))
1586 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1587 num = VIRTIO_MBUF_BURST_SZ;
1588 if (likely(num > DESC_PER_CACHELINE))
1589 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1591 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1593 for (i = 0; i < num; i++) {
1594 struct virtio_net_hdr_mrg_rxbuf *header;
1596 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1597 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1601 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1602 PMD_RX_LOG(ERR, "Packet drop");
1604 virtio_discard_rxbuf(vq, rxm);
1605 rxvq->stats.errors++;
1609 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1610 rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1611 seg_num = header->num_buffers;
1616 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1617 rxm->nb_segs = seg_num;
1620 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1621 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1623 rxm->port = rxvq->port_id;
1624 rx_pkts[nb_rx] = rxm;
1627 if (hw->has_rx_offload &&
1628 virtio_rx_offload(rxm, &header->hdr) < 0) {
1629 virtio_discard_rxbuf(vq, rxm);
1630 rxvq->stats.errors++;
1635 rte_vlan_strip(rx_pkts[nb_rx]);
1637 seg_res = seg_num - 1;
1639 /* Merge remaining segments */
1640 while (seg_res != 0 && i < (num - 1)) {
1644 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1645 rxm->pkt_len = (uint32_t)(len[i]);
1646 rxm->data_len = (uint16_t)(len[i]);
1648 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1656 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1661 /* Last packet still need merge segments */
1662 while (seg_res != 0) {
1663 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1664 VIRTIO_MBUF_BURST_SZ);
1665 uint16_t extra_idx = 0;
1667 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1669 if (unlikely(rcv_cnt == 0)) {
1670 PMD_RX_LOG(ERR, "No enough segments for packet.");
1671 rte_pktmbuf_free(rx_pkts[nb_rx]);
1672 rxvq->stats.errors++;
1676 while (extra_idx < rcv_cnt) {
1677 rxm = rcv_pkts[extra_idx];
1679 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1680 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1681 rxm->data_len = (uint16_t)(len[extra_idx]);
1685 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1690 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1695 rxvq->stats.packets += nb_rx;
1697 /* Allocate new mbuf for the used descriptor */
1698 if (likely(!virtqueue_full(vq))) {
1699 /* free_cnt may include mrg descs */
1700 uint16_t free_cnt = vq->vq_free_cnt;
1701 struct rte_mbuf *new_pkts[free_cnt];
1703 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1704 error = virtqueue_enqueue_recv_refill_packed(vq,
1705 new_pkts, free_cnt);
1706 if (unlikely(error)) {
1707 for (i = 0; i < free_cnt; i++)
1708 rte_pktmbuf_free(new_pkts[i]);
1710 nb_enqueued += free_cnt;
1712 struct rte_eth_dev *dev =
1713 &rte_eth_devices[rxvq->port_id];
1714 dev->data->rx_mbuf_alloc_failed += free_cnt;
1718 if (likely(nb_enqueued)) {
1719 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1720 virtqueue_notify(vq);
1721 PMD_RX_LOG(DEBUG, "Notified");
1729 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1735 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1736 struct rte_mbuf *m = tx_pkts[nb_tx];
1738 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1739 error = rte_validate_tx_offload(m);
1740 if (unlikely(error)) {
1746 /* Do VLAN tag insertion */
1747 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1748 error = rte_vlan_insert(&m);
1749 /* rte_vlan_insert() may change pointer
1750 * even in the case of failure
1754 if (unlikely(error)) {
1760 error = rte_net_intel_cksum_prepare(m);
1761 if (unlikely(error)) {
1766 if (m->ol_flags & PKT_TX_TCP_SEG)
1767 virtio_tso_fix_cksum(m);
1774 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1777 struct virtnet_tx *txvq = tx_queue;
1778 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1779 struct virtio_hw *hw = vq->hw;
1780 uint16_t hdr_size = hw->vtnet_hdr_size;
1782 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1784 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1787 if (unlikely(nb_pkts < 1))
1790 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1792 if (nb_pkts > vq->vq_free_cnt)
1793 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1796 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1797 struct rte_mbuf *txm = tx_pkts[nb_tx];
1798 int can_push = 0, use_indirect = 0, slots, need;
1800 /* optimize ring usage */
1801 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1802 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1803 rte_mbuf_refcnt_read(txm) == 1 &&
1804 RTE_MBUF_DIRECT(txm) &&
1805 txm->nb_segs == 1 &&
1806 rte_pktmbuf_headroom(txm) >= hdr_size &&
1807 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1808 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1810 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1811 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1813 /* How many main ring entries are needed to this Tx?
1815 * any_layout => number of segments
1816 * default => number of segments + 1
1818 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1819 need = slots - vq->vq_free_cnt;
1821 /* Positive value indicates it need free vring descriptors */
1822 if (unlikely(need > 0)) {
1823 virtio_xmit_cleanup_packed(vq, need, in_order);
1824 need = slots - vq->vq_free_cnt;
1825 if (unlikely(need > 0)) {
1827 "No free tx descriptors to transmit");
1832 /* Enqueue Packet buffers */
1834 virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1836 virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1840 virtio_update_packet_stats(&txvq->stats, txm);
1843 txvq->stats.packets += nb_tx;
1845 if (likely(nb_tx)) {
1846 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1847 virtqueue_notify(vq);
1848 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1856 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1858 struct virtnet_tx *txvq = tx_queue;
1859 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1860 struct virtio_hw *hw = vq->hw;
1861 uint16_t hdr_size = hw->vtnet_hdr_size;
1862 uint16_t nb_used, nb_tx = 0;
1864 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1867 if (unlikely(nb_pkts < 1))
1870 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1872 nb_used = virtqueue_nused(vq);
1874 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1875 virtio_xmit_cleanup(vq, nb_used);
1877 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1878 struct rte_mbuf *txm = tx_pkts[nb_tx];
1879 int can_push = 0, use_indirect = 0, slots, need;
1881 /* optimize ring usage */
1882 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1883 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1884 rte_mbuf_refcnt_read(txm) == 1 &&
1885 RTE_MBUF_DIRECT(txm) &&
1886 txm->nb_segs == 1 &&
1887 rte_pktmbuf_headroom(txm) >= hdr_size &&
1888 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1889 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1891 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1892 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1895 /* How many main ring entries are needed to this Tx?
1896 * any_layout => number of segments
1898 * default => number of segments + 1
1900 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1901 need = slots - vq->vq_free_cnt;
1903 /* Positive value indicates it need free vring descriptors */
1904 if (unlikely(need > 0)) {
1905 nb_used = virtqueue_nused(vq);
1907 need = RTE_MIN(need, (int)nb_used);
1909 virtio_xmit_cleanup(vq, need);
1910 need = slots - vq->vq_free_cnt;
1911 if (unlikely(need > 0)) {
1913 "No free tx descriptors to transmit");
1918 /* Enqueue Packet buffers */
1919 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1922 virtio_update_packet_stats(&txvq->stats, txm);
1925 txvq->stats.packets += nb_tx;
1927 if (likely(nb_tx)) {
1928 vq_update_avail_idx(vq);
1930 if (unlikely(virtqueue_kick_prepare(vq))) {
1931 virtqueue_notify(vq);
1932 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1939 static __rte_always_inline int
1940 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1942 uint16_t nb_used, nb_clean, nb_descs;
1944 nb_descs = vq->vq_free_cnt + need;
1945 nb_used = virtqueue_nused(vq);
1946 nb_clean = RTE_MIN(need, (int)nb_used);
1948 virtio_xmit_cleanup_inorder(vq, nb_clean);
1950 return nb_descs - vq->vq_free_cnt;
1954 virtio_xmit_pkts_inorder(void *tx_queue,
1955 struct rte_mbuf **tx_pkts,
1958 struct virtnet_tx *txvq = tx_queue;
1959 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1960 struct virtio_hw *hw = vq->hw;
1961 uint16_t hdr_size = hw->vtnet_hdr_size;
1962 uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1963 struct rte_mbuf *inorder_pkts[nb_pkts];
1966 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1969 if (unlikely(nb_pkts < 1))
1973 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1974 nb_used = virtqueue_nused(vq);
1976 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1977 virtio_xmit_cleanup_inorder(vq, nb_used);
1979 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1980 struct rte_mbuf *txm = tx_pkts[nb_tx];
1983 /* optimize ring usage */
1984 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1985 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1986 rte_mbuf_refcnt_read(txm) == 1 &&
1987 RTE_MBUF_DIRECT(txm) &&
1988 txm->nb_segs == 1 &&
1989 rte_pktmbuf_headroom(txm) >= hdr_size &&
1990 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1991 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1992 inorder_pkts[nb_inorder_pkts] = txm;
1998 if (nb_inorder_pkts) {
1999 need = nb_inorder_pkts - vq->vq_free_cnt;
2000 if (unlikely(need > 0)) {
2001 need = virtio_xmit_try_cleanup_inorder(vq,
2003 if (unlikely(need > 0)) {
2005 "No free tx descriptors to "
2010 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2012 nb_inorder_pkts = 0;
2015 slots = txm->nb_segs + 1;
2016 need = slots - vq->vq_free_cnt;
2017 if (unlikely(need > 0)) {
2018 need = virtio_xmit_try_cleanup_inorder(vq, slots);
2020 if (unlikely(need > 0)) {
2022 "No free tx descriptors to transmit");
2026 /* Enqueue Packet buffers */
2027 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2029 virtio_update_packet_stats(&txvq->stats, txm);
2032 /* Transmit all inorder packets */
2033 if (nb_inorder_pkts) {
2034 need = nb_inorder_pkts - vq->vq_free_cnt;
2035 if (unlikely(need > 0)) {
2036 need = virtio_xmit_try_cleanup_inorder(vq,
2038 if (unlikely(need > 0)) {
2040 "No free tx descriptors to transmit");
2041 nb_inorder_pkts = vq->vq_free_cnt;
2046 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2050 txvq->stats.packets += nb_tx;
2052 if (likely(nb_tx)) {
2053 vq_update_avail_idx(vq);
2055 if (unlikely(virtqueue_kick_prepare(vq))) {
2056 virtqueue_notify(vq);
2057 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2067 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2068 struct rte_mbuf **rx_pkts __rte_unused,
2069 uint16_t nb_pkts __rte_unused)
2075 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2076 struct rte_mbuf **tx_pkts __rte_unused,
2077 uint16_t nb_pkts __rte_unused)