1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
35 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
36 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
42 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 struct virtnet_rx *rxvq = rxq;
45 struct virtqueue *vq = rxvq->vq;
47 return VIRTQUEUE_NUSED(vq) >= offset;
51 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 vq->vq_free_cnt += num;
54 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
58 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 struct vring_desc *dp, *dp_tail;
61 struct vq_desc_extra *dxp;
62 uint16_t desc_idx_last = desc_idx;
64 dp = &vq->vq_ring.desc[desc_idx];
65 dxp = &vq->vq_descx[desc_idx];
66 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
67 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
68 while (dp->flags & VRING_DESC_F_NEXT) {
69 desc_idx_last = dp->next;
70 dp = &vq->vq_ring.desc[dp->next];
76 * We must append the existing free chain, if any, to the end of
77 * newly freed chain. If the virtqueue was completely used, then
78 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
81 vq->vq_desc_head_idx = desc_idx;
83 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
84 dp_tail->next = desc_idx;
87 vq->vq_desc_tail_idx = desc_idx_last;
88 dp->next = VQ_RING_DESC_CHAIN_END;
92 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
93 uint32_t *len, uint16_t num)
95 struct vring_used_elem *uep;
96 struct rte_mbuf *cookie;
97 uint16_t used_idx, desc_idx;
100 /* Caller does the check */
101 for (i = 0; i < num ; i++) {
102 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
103 uep = &vq->vq_ring.used->ring[used_idx];
104 desc_idx = (uint16_t) uep->id;
106 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
108 if (unlikely(cookie == NULL)) {
109 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
110 vq->vq_used_cons_idx);
114 rte_prefetch0(cookie);
115 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
117 vq->vq_used_cons_idx++;
118 vq_ring_free_chain(vq, desc_idx);
119 vq->vq_descx[desc_idx].cookie = NULL;
126 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
127 struct rte_mbuf **rx_pkts,
131 struct vring_used_elem *uep;
132 struct rte_mbuf *cookie;
133 uint16_t used_idx = 0;
136 if (unlikely(num == 0))
139 for (i = 0; i < num; i++) {
140 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
141 /* Desc idx same as used idx */
142 uep = &vq->vq_ring.used->ring[used_idx];
144 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
146 if (unlikely(cookie == NULL)) {
147 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
148 vq->vq_used_cons_idx);
152 rte_prefetch0(cookie);
153 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
155 vq->vq_used_cons_idx++;
156 vq->vq_descx[used_idx].cookie = NULL;
159 vq_ring_free_inorder(vq, used_idx, i);
163 #ifndef DEFAULT_TX_FREE_THRESH
164 #define DEFAULT_TX_FREE_THRESH 32
167 /* Cleanup from completed transmits. */
169 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
171 uint16_t i, used_idx, desc_idx;
172 for (i = 0; i < num; i++) {
173 struct vring_used_elem *uep;
174 struct vq_desc_extra *dxp;
176 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
177 uep = &vq->vq_ring.used->ring[used_idx];
179 desc_idx = (uint16_t) uep->id;
180 dxp = &vq->vq_descx[desc_idx];
181 vq->vq_used_cons_idx++;
182 vq_ring_free_chain(vq, desc_idx);
184 if (dxp->cookie != NULL) {
185 rte_pktmbuf_free(dxp->cookie);
191 /* Cleanup from completed inorder transmits. */
193 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
195 uint16_t i, used_idx, desc_idx = 0, last_idx;
196 int16_t free_cnt = 0;
197 struct vq_desc_extra *dxp = NULL;
199 if (unlikely(num == 0))
202 for (i = 0; i < num; i++) {
203 struct vring_used_elem *uep;
205 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
206 uep = &vq->vq_ring.used->ring[used_idx];
207 desc_idx = (uint16_t)uep->id;
209 dxp = &vq->vq_descx[desc_idx];
210 vq->vq_used_cons_idx++;
212 if (dxp->cookie != NULL) {
213 rte_pktmbuf_free(dxp->cookie);
218 last_idx = desc_idx + dxp->ndescs - 1;
219 free_cnt = last_idx - vq->vq_desc_tail_idx;
221 free_cnt += vq->vq_nentries;
223 vq_ring_free_inorder(vq, last_idx, free_cnt);
227 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
228 struct rte_mbuf **cookies,
231 struct vq_desc_extra *dxp;
232 struct virtio_hw *hw = vq->hw;
233 struct vring_desc *start_dp;
234 uint16_t head_idx, idx, i = 0;
236 if (unlikely(vq->vq_free_cnt == 0))
238 if (unlikely(vq->vq_free_cnt < num))
241 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
242 start_dp = vq->vq_ring.desc;
245 idx = head_idx & (vq->vq_nentries - 1);
246 dxp = &vq->vq_descx[idx];
247 dxp->cookie = (void *)cookies[i];
251 VIRTIO_MBUF_ADDR(cookies[i], vq) +
252 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
254 cookies[i]->buf_len -
255 RTE_PKTMBUF_HEADROOM +
257 start_dp[idx].flags = VRING_DESC_F_WRITE;
259 vq_update_avail_ring(vq, idx);
264 vq->vq_desc_head_idx += num;
265 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
270 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
272 struct vq_desc_extra *dxp;
273 struct virtio_hw *hw = vq->hw;
274 struct vring_desc *start_dp;
276 uint16_t head_idx, idx;
278 if (unlikely(vq->vq_free_cnt == 0))
280 if (unlikely(vq->vq_free_cnt < needed))
283 head_idx = vq->vq_desc_head_idx;
284 if (unlikely(head_idx >= vq->vq_nentries))
288 dxp = &vq->vq_descx[idx];
289 dxp->cookie = (void *)cookie;
290 dxp->ndescs = needed;
292 start_dp = vq->vq_ring.desc;
294 VIRTIO_MBUF_ADDR(cookie, vq) +
295 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
297 cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
298 start_dp[idx].flags = VRING_DESC_F_WRITE;
299 idx = start_dp[idx].next;
300 vq->vq_desc_head_idx = idx;
301 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
302 vq->vq_desc_tail_idx = idx;
303 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
304 vq_update_avail_ring(vq, head_idx);
309 /* When doing TSO, the IP length is not included in the pseudo header
310 * checksum of the packet given to the PMD, but for virtio it is
314 virtio_tso_fix_cksum(struct rte_mbuf *m)
316 /* common case: header is not fragmented */
317 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
319 struct ipv4_hdr *iph;
320 struct ipv6_hdr *ip6h;
322 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
325 iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
326 th = RTE_PTR_ADD(iph, m->l3_len);
327 if ((iph->version_ihl >> 4) == 4) {
328 iph->hdr_checksum = 0;
329 iph->hdr_checksum = rte_ipv4_cksum(iph);
330 ip_len = iph->total_length;
331 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
334 ip6h = (struct ipv6_hdr *)iph;
335 ip_paylen = ip6h->payload_len;
338 /* calculate the new phdr checksum not including ip_paylen */
339 prev_cksum = th->cksum;
342 tmp = (tmp & 0xffff) + (tmp >> 16);
345 /* replace it in the packet */
346 th->cksum = new_cksum;
351 tx_offload_enabled(struct virtio_hw *hw)
353 return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
354 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
355 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
358 /* avoid write operation when necessary, to lessen cache issues */
359 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
360 if ((var) != (val)) \
365 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
366 struct rte_mbuf *cookie,
370 if (cookie->ol_flags & PKT_TX_TCP_SEG)
371 cookie->ol_flags |= PKT_TX_TCP_CKSUM;
373 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
374 case PKT_TX_UDP_CKSUM:
375 hdr->csum_start = cookie->l2_len + cookie->l3_len;
376 hdr->csum_offset = offsetof(struct udp_hdr,
378 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
381 case PKT_TX_TCP_CKSUM:
382 hdr->csum_start = cookie->l2_len + cookie->l3_len;
383 hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
384 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
388 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
389 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
390 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
394 /* TCP Segmentation Offload */
395 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
396 virtio_tso_fix_cksum(cookie);
397 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
398 VIRTIO_NET_HDR_GSO_TCPV6 :
399 VIRTIO_NET_HDR_GSO_TCPV4;
400 hdr->gso_size = cookie->tso_segsz;
406 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
407 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
408 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
414 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
415 struct rte_mbuf **cookies,
418 struct vq_desc_extra *dxp;
419 struct virtqueue *vq = txvq->vq;
420 struct vring_desc *start_dp;
421 struct virtio_net_hdr *hdr;
423 uint16_t head_size = vq->hw->vtnet_hdr_size;
427 idx = vq->vq_desc_head_idx;
428 start_dp = vq->vq_ring.desc;
430 offload = tx_offload_enabled(vq->hw);
433 idx = idx & (vq->vq_nentries - 1);
434 dxp = &vq->vq_descx[idx];
435 dxp->cookie = (void *)cookies[i];
438 hdr = (struct virtio_net_hdr *)
439 rte_pktmbuf_prepend(cookies[i], head_size);
440 cookies[i]->pkt_len -= head_size;
442 /* if offload disabled, it is not zeroed below, do it now */
444 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
445 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
446 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
447 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
448 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
449 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
452 virtqueue_xmit_offload(hdr, cookies[i], offload);
454 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
455 start_dp[idx].len = cookies[i]->data_len;
456 start_dp[idx].flags = 0;
458 vq_update_avail_ring(vq, idx);
464 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
465 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
469 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
470 uint16_t needed, int use_indirect, int can_push,
473 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
474 struct vq_desc_extra *dxp;
475 struct virtqueue *vq = txvq->vq;
476 struct vring_desc *start_dp;
477 uint16_t seg_num = cookie->nb_segs;
478 uint16_t head_idx, idx;
479 uint16_t head_size = vq->hw->vtnet_hdr_size;
480 struct virtio_net_hdr *hdr;
483 offload = tx_offload_enabled(vq->hw);
485 head_idx = vq->vq_desc_head_idx;
487 dxp = &vq->vq_descx[idx];
488 dxp->cookie = (void *)cookie;
489 dxp->ndescs = needed;
491 start_dp = vq->vq_ring.desc;
494 /* prepend cannot fail, checked by caller */
495 hdr = (struct virtio_net_hdr *)
496 rte_pktmbuf_prepend(cookie, head_size);
497 /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
498 * which is wrong. Below subtract restores correct pkt size.
500 cookie->pkt_len -= head_size;
502 /* if offload disabled, it is not zeroed below, do it now */
504 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
505 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
506 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
507 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
508 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
509 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
511 } else if (use_indirect) {
512 /* setup tx ring slot to point to indirect
513 * descriptor list stored in reserved region.
515 * the first slot in indirect ring is already preset
516 * to point to the header in reserved region
518 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
519 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
520 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
521 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
522 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
524 /* loop below will fill in rest of the indirect elements */
525 start_dp = txr[idx].tx_indir;
528 /* setup first tx ring slot to point to header
529 * stored in reserved region.
531 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
532 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
533 start_dp[idx].len = vq->hw->vtnet_hdr_size;
534 start_dp[idx].flags = VRING_DESC_F_NEXT;
535 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
537 idx = start_dp[idx].next;
540 virtqueue_xmit_offload(hdr, cookie, offload);
543 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
544 start_dp[idx].len = cookie->data_len;
545 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
546 idx = start_dp[idx].next;
547 } while ((cookie = cookie->next) != NULL);
550 idx = vq->vq_ring.desc[head_idx].next;
552 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
554 vq->vq_desc_head_idx = idx;
555 vq_update_avail_ring(vq, head_idx);
558 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
559 vq->vq_desc_tail_idx = idx;
564 virtio_dev_cq_start(struct rte_eth_dev *dev)
566 struct virtio_hw *hw = dev->data->dev_private;
568 if (hw->cvq && hw->cvq->vq) {
569 rte_spinlock_init(&hw->cvq->lock);
570 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
575 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
578 unsigned int socket_id __rte_unused,
579 const struct rte_eth_rxconf *rx_conf __rte_unused,
580 struct rte_mempool *mp)
582 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
583 struct virtio_hw *hw = dev->data->dev_private;
584 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
585 struct virtnet_rx *rxvq;
587 PMD_INIT_FUNC_TRACE();
589 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
590 nb_desc = vq->vq_nentries;
591 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
594 rxvq->queue_id = queue_idx;
596 if (rxvq->mpool == NULL) {
597 rte_exit(EXIT_FAILURE,
598 "Cannot allocate mbufs for rx virtqueue");
601 dev->data->rx_queues[queue_idx] = rxvq;
607 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
609 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
610 struct virtio_hw *hw = dev->data->dev_private;
611 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
612 struct virtnet_rx *rxvq = &vq->rxq;
617 PMD_INIT_FUNC_TRACE();
619 /* Allocate blank mbufs for the each rx descriptor */
622 if (hw->use_simple_rx) {
623 for (desc_idx = 0; desc_idx < vq->vq_nentries;
625 vq->vq_ring.avail->ring[desc_idx] = desc_idx;
626 vq->vq_ring.desc[desc_idx].flags =
630 virtio_rxq_vec_setup(rxvq);
633 memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
634 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
636 vq->sw_ring[vq->vq_nentries + desc_idx] =
640 if (hw->use_simple_rx) {
641 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
642 virtio_rxq_rearm_vec(rxvq);
643 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
645 } else if (hw->use_inorder_rx) {
646 if ((!virtqueue_full(vq))) {
647 uint16_t free_cnt = vq->vq_free_cnt;
648 struct rte_mbuf *pkts[free_cnt];
650 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
652 error = virtqueue_enqueue_refill_inorder(vq,
655 if (unlikely(error)) {
656 for (i = 0; i < free_cnt; i++)
657 rte_pktmbuf_free(pkts[i]);
662 vq_update_avail_idx(vq);
665 while (!virtqueue_full(vq)) {
666 m = rte_mbuf_raw_alloc(rxvq->mpool);
670 /* Enqueue allocated buffers */
671 error = virtqueue_enqueue_recv_refill(vq, m);
679 vq_update_avail_idx(vq);
682 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
690 * struct rte_eth_dev *dev: Used to update dev
691 * uint16_t nb_desc: Defaults to values read from config space
692 * unsigned int socket_id: Used to allocate memzone
693 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
694 * uint16_t queue_idx: Just used as an index in dev txq list
697 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
700 unsigned int socket_id __rte_unused,
701 const struct rte_eth_txconf *tx_conf)
703 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
704 struct virtio_hw *hw = dev->data->dev_private;
705 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
706 struct virtnet_tx *txvq;
707 uint16_t tx_free_thresh;
709 PMD_INIT_FUNC_TRACE();
711 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
712 nb_desc = vq->vq_nentries;
713 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
716 txvq->queue_id = queue_idx;
718 tx_free_thresh = tx_conf->tx_free_thresh;
719 if (tx_free_thresh == 0)
721 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
723 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
724 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
725 "number of TX entries minus 3 (%u)."
726 " (tx_free_thresh=%u port=%u queue=%u)\n",
728 tx_free_thresh, dev->data->port_id, queue_idx);
732 vq->vq_free_thresh = tx_free_thresh;
734 dev->data->tx_queues[queue_idx] = txvq;
739 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
742 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
743 struct virtio_hw *hw = dev->data->dev_private;
744 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
746 PMD_INIT_FUNC_TRACE();
748 if (hw->use_inorder_tx)
749 vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
757 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
761 * Requeue the discarded mbuf. This should always be
762 * successful since it was just dequeued.
764 error = virtqueue_enqueue_recv_refill(vq, m);
766 if (unlikely(error)) {
767 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
773 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
777 error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
778 if (unlikely(error)) {
779 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
785 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
787 uint32_t s = mbuf->pkt_len;
788 struct ether_addr *ea;
791 stats->size_bins[1]++;
792 } else if (s > 64 && s < 1024) {
795 /* count zeros, and offset into correct bin */
796 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
797 stats->size_bins[bin]++;
800 stats->size_bins[0]++;
802 stats->size_bins[6]++;
804 stats->size_bins[7]++;
807 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
808 if (is_multicast_ether_addr(ea)) {
809 if (is_broadcast_ether_addr(ea))
817 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
819 VIRTIO_DUMP_PACKET(m, m->data_len);
821 rxvq->stats.bytes += m->pkt_len;
822 virtio_update_packet_stats(&rxvq->stats, m);
825 /* Optionally fill offload information in structure */
827 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
829 struct rte_net_hdr_lens hdr_lens;
830 uint32_t hdrlen, ptype;
831 int l4_supported = 0;
834 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
837 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
839 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
840 m->packet_type = ptype;
841 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
842 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
843 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
846 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
847 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
848 if (hdr->csum_start <= hdrlen && l4_supported) {
849 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
851 /* Unknown proto or tunnel, do sw cksum. We can assume
852 * the cksum field is in the first segment since the
853 * buffers we provided to the host are large enough.
854 * In case of SCTP, this will be wrong since it's a CRC
855 * but there's nothing we can do.
857 uint16_t csum = 0, off;
859 rte_raw_cksum_mbuf(m, hdr->csum_start,
860 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
862 if (likely(csum != 0xffff))
864 off = hdr->csum_offset + hdr->csum_start;
865 if (rte_pktmbuf_data_len(m) >= off + 1)
866 *rte_pktmbuf_mtod_offset(m, uint16_t *,
869 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
870 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
873 /* GSO request, save required information in mbuf */
874 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
875 /* Check unsupported modes */
876 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
877 (hdr->gso_size == 0)) {
881 /* Update mss lengthes in mbuf */
882 m->tso_segsz = hdr->gso_size;
883 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
884 case VIRTIO_NET_HDR_GSO_TCPV4:
885 case VIRTIO_NET_HDR_GSO_TCPV6:
886 m->ol_flags |= PKT_RX_LRO | \
887 PKT_RX_L4_CKSUM_NONE;
898 rx_offload_enabled(struct virtio_hw *hw)
900 return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
901 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
902 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
905 #define VIRTIO_MBUF_BURST_SZ 64
906 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
908 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
910 struct virtnet_rx *rxvq = rx_queue;
911 struct virtqueue *vq = rxvq->vq;
912 struct virtio_hw *hw = vq->hw;
913 struct rte_mbuf *rxm, *new_mbuf;
914 uint16_t nb_used, num, nb_rx;
915 uint32_t len[VIRTIO_MBUF_BURST_SZ];
916 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
918 uint32_t i, nb_enqueued;
921 struct virtio_net_hdr *hdr;
924 if (unlikely(hw->started == 0))
927 nb_used = VIRTQUEUE_NUSED(vq);
931 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
932 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
933 num = VIRTIO_MBUF_BURST_SZ;
934 if (likely(num > DESC_PER_CACHELINE))
935 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
937 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
938 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
941 hdr_size = hw->vtnet_hdr_size;
942 offload = rx_offload_enabled(hw);
944 for (i = 0; i < num ; i++) {
947 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
949 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
950 PMD_RX_LOG(ERR, "Packet drop");
952 virtio_discard_rxbuf(vq, rxm);
953 rxvq->stats.errors++;
957 rxm->port = rxvq->port_id;
958 rxm->data_off = RTE_PKTMBUF_HEADROOM;
962 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
963 rxm->data_len = (uint16_t)(len[i] - hdr_size);
965 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
966 RTE_PKTMBUF_HEADROOM - hdr_size);
971 if (offload && virtio_rx_offload(rxm, hdr) < 0) {
972 virtio_discard_rxbuf(vq, rxm);
973 rxvq->stats.errors++;
977 virtio_rx_stats_updated(rxvq, rxm);
979 rx_pkts[nb_rx++] = rxm;
982 rxvq->stats.packets += nb_rx;
984 /* Allocate new mbuf for the used descriptor */
986 while (likely(!virtqueue_full(vq))) {
987 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
988 if (unlikely(new_mbuf == NULL)) {
989 struct rte_eth_dev *dev
990 = &rte_eth_devices[rxvq->port_id];
991 dev->data->rx_mbuf_alloc_failed++;
994 error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
995 if (unlikely(error)) {
996 rte_pktmbuf_free(new_mbuf);
1002 if (likely(nb_enqueued)) {
1003 vq_update_avail_idx(vq);
1005 if (unlikely(virtqueue_kick_prepare(vq))) {
1006 virtqueue_notify(vq);
1007 PMD_RX_LOG(DEBUG, "Notified");
1015 virtio_recv_mergeable_pkts_inorder(void *rx_queue,
1016 struct rte_mbuf **rx_pkts,
1019 struct virtnet_rx *rxvq = rx_queue;
1020 struct virtqueue *vq = rxvq->vq;
1021 struct virtio_hw *hw = vq->hw;
1022 struct rte_mbuf *rxm;
1023 struct rte_mbuf *prev;
1024 uint16_t nb_used, num, nb_rx;
1025 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1026 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1028 uint32_t nb_enqueued;
1036 if (unlikely(hw->started == 0))
1039 nb_used = VIRTQUEUE_NUSED(vq);
1040 nb_used = RTE_MIN(nb_used, nb_pkts);
1041 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1045 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1050 hdr_size = hw->vtnet_hdr_size;
1051 offload = rx_offload_enabled(hw);
1053 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1055 for (i = 0; i < num; i++) {
1056 struct virtio_net_hdr_mrg_rxbuf *header;
1058 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1059 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1063 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1064 PMD_RX_LOG(ERR, "Packet drop");
1066 virtio_discard_rxbuf_inorder(vq, rxm);
1067 rxvq->stats.errors++;
1071 header = (struct virtio_net_hdr_mrg_rxbuf *)
1072 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1074 seg_num = header->num_buffers;
1079 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1080 rxm->nb_segs = seg_num;
1083 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1084 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1086 rxm->port = rxvq->port_id;
1088 rx_pkts[nb_rx] = rxm;
1091 if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
1092 virtio_discard_rxbuf_inorder(vq, rxm);
1093 rxvq->stats.errors++;
1098 rte_vlan_strip(rx_pkts[nb_rx]);
1100 seg_res = seg_num - 1;
1102 /* Merge remaining segments */
1103 while (seg_res != 0 && i < (num - 1)) {
1107 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1108 rxm->pkt_len = (uint32_t)(len[i]);
1109 rxm->data_len = (uint16_t)(len[i]);
1111 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1112 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1122 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1127 /* Last packet still need merge segments */
1128 while (seg_res != 0) {
1129 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1130 VIRTIO_MBUF_BURST_SZ);
1132 prev = rcv_pkts[nb_rx];
1133 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1134 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1136 uint16_t extra_idx = 0;
1139 while (extra_idx < rcv_cnt) {
1140 rxm = rcv_pkts[extra_idx];
1142 RTE_PKTMBUF_HEADROOM - hdr_size;
1143 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1144 rxm->data_len = (uint16_t)(len[extra_idx]);
1147 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1148 rx_pkts[nb_rx]->data_len += len[extra_idx];
1154 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1159 "No enough segments for packet.");
1160 virtio_discard_rxbuf_inorder(vq, prev);
1161 rxvq->stats.errors++;
1166 rxvq->stats.packets += nb_rx;
1168 /* Allocate new mbuf for the used descriptor */
1170 if (likely(!virtqueue_full(vq))) {
1171 /* free_cnt may include mrg descs */
1172 uint16_t free_cnt = vq->vq_free_cnt;
1173 struct rte_mbuf *new_pkts[free_cnt];
1175 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1176 error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1178 if (unlikely(error)) {
1179 for (i = 0; i < free_cnt; i++)
1180 rte_pktmbuf_free(new_pkts[i]);
1182 nb_enqueued += free_cnt;
1184 struct rte_eth_dev *dev =
1185 &rte_eth_devices[rxvq->port_id];
1186 dev->data->rx_mbuf_alloc_failed += free_cnt;
1190 if (likely(nb_enqueued)) {
1191 vq_update_avail_idx(vq);
1193 if (unlikely(virtqueue_kick_prepare(vq))) {
1194 virtqueue_notify(vq);
1195 PMD_RX_LOG(DEBUG, "Notified");
1203 virtio_recv_mergeable_pkts(void *rx_queue,
1204 struct rte_mbuf **rx_pkts,
1207 struct virtnet_rx *rxvq = rx_queue;
1208 struct virtqueue *vq = rxvq->vq;
1209 struct virtio_hw *hw = vq->hw;
1210 struct rte_mbuf *rxm, *new_mbuf;
1211 uint16_t nb_used, num, nb_rx;
1212 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1213 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1214 struct rte_mbuf *prev;
1216 uint32_t i, nb_enqueued;
1224 if (unlikely(hw->started == 0))
1227 nb_used = VIRTQUEUE_NUSED(vq);
1231 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1238 hdr_size = hw->vtnet_hdr_size;
1239 offload = rx_offload_enabled(hw);
1241 while (i < nb_used) {
1242 struct virtio_net_hdr_mrg_rxbuf *header;
1244 if (nb_rx == nb_pkts)
1247 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
1253 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1254 PMD_RX_LOG(DEBUG, "packet len:%d", len[0]);
1258 if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
1259 PMD_RX_LOG(ERR, "Packet drop");
1261 virtio_discard_rxbuf(vq, rxm);
1262 rxvq->stats.errors++;
1266 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
1267 RTE_PKTMBUF_HEADROOM - hdr_size);
1268 seg_num = header->num_buffers;
1273 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1274 rxm->nb_segs = seg_num;
1277 rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
1278 rxm->data_len = (uint16_t)(len[0] - hdr_size);
1280 rxm->port = rxvq->port_id;
1281 rx_pkts[nb_rx] = rxm;
1284 if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
1285 virtio_discard_rxbuf(vq, rxm);
1286 rxvq->stats.errors++;
1290 seg_res = seg_num - 1;
1292 while (seg_res != 0) {
1294 * Get extra segments for current uncompleted packet.
1297 RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
1298 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1300 virtqueue_dequeue_burst_rx(vq,
1301 rcv_pkts, len, rcv_cnt);
1306 "No enough segments for packet.");
1308 virtio_discard_rxbuf(vq, rxm);
1309 rxvq->stats.errors++;
1315 while (extra_idx < rcv_cnt) {
1316 rxm = rcv_pkts[extra_idx];
1318 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1319 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1320 rxm->data_len = (uint16_t)(len[extra_idx]);
1326 rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
1333 rte_vlan_strip(rx_pkts[nb_rx]);
1335 VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
1336 rx_pkts[nb_rx]->data_len);
1338 rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;
1339 virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
1343 rxvq->stats.packets += nb_rx;
1345 /* Allocate new mbuf for the used descriptor */
1347 while (likely(!virtqueue_full(vq))) {
1348 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1349 if (unlikely(new_mbuf == NULL)) {
1350 struct rte_eth_dev *dev
1351 = &rte_eth_devices[rxvq->port_id];
1352 dev->data->rx_mbuf_alloc_failed++;
1355 error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
1356 if (unlikely(error)) {
1357 rte_pktmbuf_free(new_mbuf);
1363 if (likely(nb_enqueued)) {
1364 vq_update_avail_idx(vq);
1366 if (unlikely(virtqueue_kick_prepare(vq))) {
1367 virtqueue_notify(vq);
1368 PMD_RX_LOG(DEBUG, "Notified");
1376 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1378 struct virtnet_tx *txvq = tx_queue;
1379 struct virtqueue *vq = txvq->vq;
1380 struct virtio_hw *hw = vq->hw;
1381 uint16_t hdr_size = hw->vtnet_hdr_size;
1382 uint16_t nb_used, nb_tx = 0;
1385 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1388 if (unlikely(nb_pkts < 1))
1391 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1392 nb_used = VIRTQUEUE_NUSED(vq);
1395 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1396 virtio_xmit_cleanup(vq, nb_used);
1398 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1399 struct rte_mbuf *txm = tx_pkts[nb_tx];
1400 int can_push = 0, use_indirect = 0, slots, need;
1402 /* Do VLAN tag insertion */
1403 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1404 error = rte_vlan_insert(&txm);
1405 if (unlikely(error)) {
1406 rte_pktmbuf_free(txm);
1411 /* optimize ring usage */
1412 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1413 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1414 rte_mbuf_refcnt_read(txm) == 1 &&
1415 RTE_MBUF_DIRECT(txm) &&
1416 txm->nb_segs == 1 &&
1417 rte_pktmbuf_headroom(txm) >= hdr_size &&
1418 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1419 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1421 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1422 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1425 /* How many main ring entries are needed to this Tx?
1426 * any_layout => number of segments
1428 * default => number of segments + 1
1430 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1431 need = slots - vq->vq_free_cnt;
1433 /* Positive value indicates it need free vring descriptors */
1434 if (unlikely(need > 0)) {
1435 nb_used = VIRTQUEUE_NUSED(vq);
1437 need = RTE_MIN(need, (int)nb_used);
1439 virtio_xmit_cleanup(vq, need);
1440 need = slots - vq->vq_free_cnt;
1441 if (unlikely(need > 0)) {
1443 "No free tx descriptors to transmit");
1448 /* Enqueue Packet buffers */
1449 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1452 txvq->stats.bytes += txm->pkt_len;
1453 virtio_update_packet_stats(&txvq->stats, txm);
1456 txvq->stats.packets += nb_tx;
1458 if (likely(nb_tx)) {
1459 vq_update_avail_idx(vq);
1461 if (unlikely(virtqueue_kick_prepare(vq))) {
1462 virtqueue_notify(vq);
1463 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1471 virtio_xmit_pkts_inorder(void *tx_queue,
1472 struct rte_mbuf **tx_pkts,
1475 struct virtnet_tx *txvq = tx_queue;
1476 struct virtqueue *vq = txvq->vq;
1477 struct virtio_hw *hw = vq->hw;
1478 uint16_t hdr_size = hw->vtnet_hdr_size;
1479 uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
1480 struct rte_mbuf *inorder_pkts[nb_pkts];
1483 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1486 if (unlikely(nb_pkts < 1))
1490 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1491 nb_used = VIRTQUEUE_NUSED(vq);
1494 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1495 virtio_xmit_cleanup_inorder(vq, nb_used);
1497 if (unlikely(!vq->vq_free_cnt))
1498 virtio_xmit_cleanup_inorder(vq, nb_used);
1500 nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
1502 for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
1503 struct rte_mbuf *txm = tx_pkts[nb_tx];
1506 /* Do VLAN tag insertion */
1507 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1508 error = rte_vlan_insert(&txm);
1509 if (unlikely(error)) {
1510 rte_pktmbuf_free(txm);
1515 /* optimize ring usage */
1516 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1517 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1518 rte_mbuf_refcnt_read(txm) == 1 &&
1519 RTE_MBUF_DIRECT(txm) &&
1520 txm->nb_segs == 1 &&
1521 rte_pktmbuf_headroom(txm) >= hdr_size &&
1522 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1523 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1524 inorder_pkts[nb_inorder_pkts] = txm;
1527 txvq->stats.bytes += txm->pkt_len;
1528 virtio_update_packet_stats(&txvq->stats, txm);
1532 if (nb_inorder_pkts) {
1533 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1535 nb_inorder_pkts = 0;
1538 slots = txm->nb_segs + 1;
1539 need = slots - vq->vq_free_cnt;
1540 if (unlikely(need > 0)) {
1541 nb_used = VIRTQUEUE_NUSED(vq);
1543 need = RTE_MIN(need, (int)nb_used);
1545 virtio_xmit_cleanup_inorder(vq, need);
1547 need = slots - vq->vq_free_cnt;
1549 if (unlikely(need > 0)) {
1551 "No free tx descriptors to transmit");
1555 /* Enqueue Packet buffers */
1556 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
1558 txvq->stats.bytes += txm->pkt_len;
1559 virtio_update_packet_stats(&txvq->stats, txm);
1562 /* Transmit all inorder packets */
1563 if (nb_inorder_pkts)
1564 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1567 txvq->stats.packets += nb_tx;
1569 if (likely(nb_tx)) {
1570 vq_update_avail_idx(vq);
1572 if (unlikely(virtqueue_kick_prepare(vq))) {
1573 virtqueue_notify(vq);
1574 PMD_TX_LOG(DEBUG, "Notified backend after xmit");