1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
45 struct virtnet_rx *rxvq = rxq;
46 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
48 return virtqueue_nused(vq) >= offset;
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
54 vq->vq_free_cnt += num;
55 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
61 struct vring_desc *dp, *dp_tail;
62 struct vq_desc_extra *dxp;
63 uint16_t desc_idx_last = desc_idx;
65 dp = &vq->vq_split.ring.desc[desc_idx];
66 dxp = &vq->vq_descx[desc_idx];
67 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 while (dp->flags & VRING_DESC_F_NEXT) {
70 desc_idx_last = dp->next;
71 dp = &vq->vq_split.ring.desc[dp->next];
77 * We must append the existing free chain, if any, to the end of
78 * newly freed chain. If the virtqueue was completely used, then
79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
81 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 vq->vq_desc_head_idx = desc_idx;
84 dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
85 dp_tail->next = desc_idx;
88 vq->vq_desc_tail_idx = desc_idx_last;
89 dp->next = VQ_RING_DESC_CHAIN_END;
93 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
95 uint32_t s = mbuf->pkt_len;
96 struct rte_ether_addr *ea;
101 stats->size_bins[1]++;
102 } else if (s > 64 && s < 1024) {
105 /* count zeros, and offset into correct bin */
106 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
107 stats->size_bins[bin]++;
110 stats->size_bins[0]++;
112 stats->size_bins[6]++;
114 stats->size_bins[7]++;
117 ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
118 if (rte_is_multicast_ether_addr(ea)) {
119 if (rte_is_broadcast_ether_addr(ea))
127 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
129 VIRTIO_DUMP_PACKET(m, m->data_len);
131 virtio_update_packet_stats(&rxvq->stats, m);
135 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
136 struct rte_mbuf **rx_pkts,
140 struct rte_mbuf *cookie;
143 struct vring_packed_desc *desc;
146 desc = vq->vq_packed.ring.desc;
148 for (i = 0; i < num; i++) {
149 used_idx = vq->vq_used_cons_idx;
150 /* desc_is_used has a load-acquire or rte_io_rmb inside
151 * and wait for used desc in virtqueue.
153 if (!desc_is_used(&desc[used_idx], vq))
155 len[i] = desc[used_idx].len;
156 id = desc[used_idx].id;
157 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
158 if (unlikely(cookie == NULL)) {
159 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
160 vq->vq_used_cons_idx);
163 rte_prefetch0(cookie);
164 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
168 vq->vq_used_cons_idx++;
169 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
170 vq->vq_used_cons_idx -= vq->vq_nentries;
171 vq->vq_packed.used_wrap_counter ^= 1;
179 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
180 uint32_t *len, uint16_t num)
182 struct vring_used_elem *uep;
183 struct rte_mbuf *cookie;
184 uint16_t used_idx, desc_idx;
187 /* Caller does the check */
188 for (i = 0; i < num ; i++) {
189 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
190 uep = &vq->vq_split.ring.used->ring[used_idx];
191 desc_idx = (uint16_t) uep->id;
193 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
195 if (unlikely(cookie == NULL)) {
196 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
197 vq->vq_used_cons_idx);
201 rte_prefetch0(cookie);
202 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
204 vq->vq_used_cons_idx++;
205 vq_ring_free_chain(vq, desc_idx);
206 vq->vq_descx[desc_idx].cookie = NULL;
213 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
214 struct rte_mbuf **rx_pkts,
218 struct vring_used_elem *uep;
219 struct rte_mbuf *cookie;
220 uint16_t used_idx = 0;
223 if (unlikely(num == 0))
226 for (i = 0; i < num; i++) {
227 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
228 /* Desc idx same as used idx */
229 uep = &vq->vq_split.ring.used->ring[used_idx];
231 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
233 if (unlikely(cookie == NULL)) {
234 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
235 vq->vq_used_cons_idx);
239 rte_prefetch0(cookie);
240 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
242 vq->vq_used_cons_idx++;
243 vq->vq_descx[used_idx].cookie = NULL;
246 vq_ring_free_inorder(vq, used_idx, i);
251 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
252 struct rte_mbuf **cookies,
255 struct vq_desc_extra *dxp;
256 struct virtio_hw *hw = vq->hw;
257 struct vring_desc *start_dp;
258 uint16_t head_idx, idx, i = 0;
260 if (unlikely(vq->vq_free_cnt == 0))
262 if (unlikely(vq->vq_free_cnt < num))
265 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
266 start_dp = vq->vq_split.ring.desc;
269 idx = head_idx & (vq->vq_nentries - 1);
270 dxp = &vq->vq_descx[idx];
271 dxp->cookie = (void *)cookies[i];
274 start_dp[idx].addr = cookies[i]->buf_iova +
275 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
276 start_dp[idx].len = cookies[i]->buf_len -
277 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
278 start_dp[idx].flags = VRING_DESC_F_WRITE;
280 vq_update_avail_ring(vq, idx);
285 vq->vq_desc_head_idx += num;
286 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
291 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
294 struct vq_desc_extra *dxp;
295 struct virtio_hw *hw = vq->hw;
296 struct vring_desc *start_dp = vq->vq_split.ring.desc;
299 if (unlikely(vq->vq_free_cnt == 0))
301 if (unlikely(vq->vq_free_cnt < num))
304 if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
307 for (i = 0; i < num; i++) {
308 idx = vq->vq_desc_head_idx;
309 dxp = &vq->vq_descx[idx];
310 dxp->cookie = (void *)cookie[i];
313 start_dp[idx].addr = cookie[i]->buf_iova +
314 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
315 start_dp[idx].len = cookie[i]->buf_len -
316 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
317 start_dp[idx].flags = VRING_DESC_F_WRITE;
318 vq->vq_desc_head_idx = start_dp[idx].next;
319 vq_update_avail_ring(vq, idx);
320 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
321 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
326 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
332 virtqueue_refill_single_packed(struct virtqueue *vq,
333 struct vring_packed_desc *dp,
334 struct rte_mbuf *cookie)
336 uint16_t flags = vq->vq_packed.cached_flags;
337 struct virtio_hw *hw = vq->hw;
339 dp->addr = cookie->buf_iova +
340 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
341 dp->len = cookie->buf_len -
342 RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
344 virtqueue_store_flags_packed(dp, flags,
347 if (++vq->vq_avail_idx >= vq->vq_nentries) {
348 vq->vq_avail_idx -= vq->vq_nentries;
349 vq->vq_packed.cached_flags ^=
350 VRING_PACKED_DESC_F_AVAIL_USED;
351 flags = vq->vq_packed.cached_flags;
356 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
357 struct rte_mbuf **cookie, uint16_t num)
359 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
360 struct vq_desc_extra *dxp;
364 if (unlikely(vq->vq_free_cnt == 0))
366 if (unlikely(vq->vq_free_cnt < num))
369 for (i = 0; i < num; i++) {
370 idx = vq->vq_avail_idx;
371 dxp = &vq->vq_descx[idx];
372 dxp->cookie = (void *)cookie[i];
375 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
377 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
382 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
383 struct rte_mbuf **cookie, uint16_t num)
385 struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
386 struct vq_desc_extra *dxp;
390 if (unlikely(vq->vq_free_cnt == 0))
392 if (unlikely(vq->vq_free_cnt < num))
395 for (i = 0; i < num; i++) {
396 idx = vq->vq_avail_idx;
397 did = start_dp[idx].id;
398 dxp = &vq->vq_descx[did];
399 dxp->cookie = (void *)cookie[i];
402 virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
404 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
408 /* When doing TSO, the IP length is not included in the pseudo header
409 * checksum of the packet given to the PMD, but for virtio it is
413 virtio_tso_fix_cksum(struct rte_mbuf *m)
415 /* common case: header is not fragmented */
416 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
418 struct rte_ipv4_hdr *iph;
419 struct rte_ipv6_hdr *ip6h;
420 struct rte_tcp_hdr *th;
421 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
424 iph = rte_pktmbuf_mtod_offset(m,
425 struct rte_ipv4_hdr *, m->l2_len);
426 th = RTE_PTR_ADD(iph, m->l3_len);
427 if ((iph->version_ihl >> 4) == 4) {
428 iph->hdr_checksum = 0;
429 iph->hdr_checksum = rte_ipv4_cksum(iph);
430 ip_len = iph->total_length;
431 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
434 ip6h = (struct rte_ipv6_hdr *)iph;
435 ip_paylen = ip6h->payload_len;
438 /* calculate the new phdr checksum not including ip_paylen */
439 prev_cksum = th->cksum;
442 tmp = (tmp & 0xffff) + (tmp >> 16);
445 /* replace it in the packet */
446 th->cksum = new_cksum;
454 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
455 struct rte_mbuf **cookies,
458 struct vq_desc_extra *dxp;
459 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
460 struct vring_desc *start_dp;
461 struct virtio_net_hdr *hdr;
463 int16_t head_size = vq->hw->vtnet_hdr_size;
466 idx = vq->vq_desc_head_idx;
467 start_dp = vq->vq_split.ring.desc;
470 idx = idx & (vq->vq_nentries - 1);
471 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
472 dxp->cookie = (void *)cookies[i];
474 virtio_update_packet_stats(&txvq->stats, cookies[i]);
476 hdr = rte_pktmbuf_mtod_offset(cookies[i],
477 struct virtio_net_hdr *, -head_size);
479 /* if offload disabled, hdr is not zeroed yet, do it now */
480 if (!vq->hw->has_tx_offload)
481 virtqueue_clear_net_hdr(hdr);
483 virtqueue_xmit_offload(hdr, cookies[i]);
485 start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
486 start_dp[idx].len = cookies[i]->data_len + head_size;
487 start_dp[idx].flags = 0;
490 vq_update_avail_ring(vq, idx);
496 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
497 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
501 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
502 struct rte_mbuf *cookie,
505 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
506 struct vring_packed_desc *dp;
507 struct vq_desc_extra *dxp;
508 uint16_t idx, id, flags;
509 int16_t head_size = vq->hw->vtnet_hdr_size;
510 struct virtio_net_hdr *hdr;
512 id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
513 idx = vq->vq_avail_idx;
514 dp = &vq->vq_packed.ring.desc[idx];
516 dxp = &vq->vq_descx[id];
518 dxp->cookie = cookie;
520 flags = vq->vq_packed.cached_flags;
522 /* prepend cannot fail, checked by caller */
523 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
526 /* if offload disabled, hdr is not zeroed yet, do it now */
527 if (!vq->hw->has_tx_offload)
528 virtqueue_clear_net_hdr(hdr);
530 virtqueue_xmit_offload(hdr, cookie);
532 dp->addr = rte_mbuf_data_iova(cookie) - head_size;
533 dp->len = cookie->data_len + head_size;
536 if (++vq->vq_avail_idx >= vq->vq_nentries) {
537 vq->vq_avail_idx -= vq->vq_nentries;
538 vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
544 vq->vq_desc_head_idx = dxp->next;
545 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
546 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
549 virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
553 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
554 uint16_t needed, int use_indirect, int can_push,
557 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
558 struct vq_desc_extra *dxp;
559 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
560 struct vring_desc *start_dp;
561 uint16_t seg_num = cookie->nb_segs;
562 uint16_t head_idx, idx;
563 int16_t head_size = vq->hw->vtnet_hdr_size;
564 bool prepend_header = false;
565 struct virtio_net_hdr *hdr;
567 head_idx = vq->vq_desc_head_idx;
570 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
572 dxp = &vq->vq_descx[idx];
573 dxp->cookie = (void *)cookie;
574 dxp->ndescs = needed;
576 start_dp = vq->vq_split.ring.desc;
579 /* prepend cannot fail, checked by caller */
580 hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
582 prepend_header = true;
584 /* if offload disabled, it is not zeroed below, do it now */
585 if (!vq->hw->has_tx_offload)
586 virtqueue_clear_net_hdr(hdr);
587 } else if (use_indirect) {
588 /* setup tx ring slot to point to indirect
589 * descriptor list stored in reserved region.
591 * the first slot in indirect ring is already preset
592 * to point to the header in reserved region
594 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
595 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
596 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
597 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
598 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
600 /* loop below will fill in rest of the indirect elements */
601 start_dp = txr[idx].tx_indir;
604 /* setup first tx ring slot to point to header
605 * stored in reserved region.
607 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
608 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
609 start_dp[idx].len = vq->hw->vtnet_hdr_size;
610 start_dp[idx].flags = VRING_DESC_F_NEXT;
611 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
613 idx = start_dp[idx].next;
616 if (vq->hw->has_tx_offload)
617 virtqueue_xmit_offload(hdr, cookie);
620 start_dp[idx].addr = rte_mbuf_data_iova(cookie);
621 start_dp[idx].len = cookie->data_len;
622 if (prepend_header) {
623 start_dp[idx].addr -= head_size;
624 start_dp[idx].len += head_size;
625 prepend_header = false;
627 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
628 idx = start_dp[idx].next;
629 } while ((cookie = cookie->next) != NULL);
632 idx = vq->vq_split.ring.desc[head_idx].next;
634 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
636 vq->vq_desc_head_idx = idx;
637 vq_update_avail_ring(vq, head_idx);
640 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
641 vq->vq_desc_tail_idx = idx;
646 virtio_dev_cq_start(struct rte_eth_dev *dev)
648 struct virtio_hw *hw = dev->data->dev_private;
651 rte_spinlock_init(&hw->cvq->lock);
652 VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
657 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
660 unsigned int socket_id __rte_unused,
661 const struct rte_eth_rxconf *rx_conf,
662 struct rte_mempool *mp)
664 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
665 struct virtio_hw *hw = dev->data->dev_private;
666 struct virtqueue *vq = hw->vqs[vq_idx];
667 struct virtnet_rx *rxvq;
668 uint16_t rx_free_thresh;
672 PMD_INIT_FUNC_TRACE();
674 if (rx_conf->rx_deferred_start) {
675 PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
679 buf_size = virtio_rx_mem_pool_buf_size(mp);
680 if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
681 hw->rx_ol_scatter, &error)) {
682 PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
687 rx_free_thresh = rx_conf->rx_free_thresh;
688 if (rx_free_thresh == 0)
690 RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
692 if (rx_free_thresh & 0x3) {
693 RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
694 " (rx_free_thresh=%u port=%u queue=%u)\n",
695 rx_free_thresh, dev->data->port_id, queue_idx);
699 if (rx_free_thresh >= vq->vq_nentries) {
700 RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
701 "number of RX entries (%u)."
702 " (rx_free_thresh=%u port=%u queue=%u)\n",
704 rx_free_thresh, dev->data->port_id, queue_idx);
707 vq->vq_free_thresh = rx_free_thresh;
710 * For split ring vectorized path descriptors number must be
711 * equal to the ring size.
713 if (nb_desc > vq->vq_nentries ||
714 (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
715 nb_desc = vq->vq_nentries;
717 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
720 rxvq->queue_id = queue_idx;
722 dev->data->rx_queues[queue_idx] = rxvq;
728 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
730 uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
731 struct virtio_hw *hw = dev->data->dev_private;
732 struct virtqueue *vq = hw->vqs[vq_idx];
733 struct virtnet_rx *rxvq = &vq->rxq;
737 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
739 PMD_INIT_FUNC_TRACE();
741 /* Allocate blank mbufs for the each rx descriptor */
744 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
745 for (desc_idx = 0; desc_idx < vq->vq_nentries;
747 vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
748 vq->vq_split.ring.desc[desc_idx].flags =
752 virtio_rxq_vec_setup(rxvq);
755 memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
756 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
757 vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
759 if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
760 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
761 virtio_rxq_rearm_vec(rxvq);
762 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
764 } else if (!virtio_with_packed_queue(vq->hw) && in_order) {
765 if ((!virtqueue_full(vq))) {
766 uint16_t free_cnt = vq->vq_free_cnt;
767 struct rte_mbuf *pkts[free_cnt];
769 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
771 error = virtqueue_enqueue_refill_inorder(vq,
774 if (unlikely(error)) {
775 for (i = 0; i < free_cnt; i++)
776 rte_pktmbuf_free(pkts[i]);
782 vq_update_avail_idx(vq);
785 while (!virtqueue_full(vq)) {
786 m = rte_mbuf_raw_alloc(rxvq->mpool);
790 /* Enqueue allocated buffers */
791 if (virtio_with_packed_queue(vq->hw))
792 error = virtqueue_enqueue_recv_refill_packed_init(vq,
795 error = virtqueue_enqueue_recv_refill(vq,
804 if (!virtio_with_packed_queue(vq->hw))
805 vq_update_avail_idx(vq);
808 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
816 * struct rte_eth_dev *dev: Used to update dev
817 * uint16_t nb_desc: Defaults to values read from config space
818 * unsigned int socket_id: Used to allocate memzone
819 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
820 * uint16_t queue_idx: Just used as an index in dev txq list
823 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
826 unsigned int socket_id __rte_unused,
827 const struct rte_eth_txconf *tx_conf)
829 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
830 struct virtio_hw *hw = dev->data->dev_private;
831 struct virtqueue *vq = hw->vqs[vq_idx];
832 struct virtnet_tx *txvq;
833 uint16_t tx_free_thresh;
835 PMD_INIT_FUNC_TRACE();
837 if (tx_conf->tx_deferred_start) {
838 PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
842 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
843 nb_desc = vq->vq_nentries;
844 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
847 txvq->queue_id = queue_idx;
849 tx_free_thresh = tx_conf->tx_free_thresh;
850 if (tx_free_thresh == 0)
852 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
854 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
855 PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
856 "number of TX entries minus 3 (%u)."
857 " (tx_free_thresh=%u port=%u queue=%u)\n",
859 tx_free_thresh, dev->data->port_id, queue_idx);
863 vq->vq_free_thresh = tx_free_thresh;
865 dev->data->tx_queues[queue_idx] = txvq;
870 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
873 uint8_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
874 struct virtio_hw *hw = dev->data->dev_private;
875 struct virtqueue *vq = hw->vqs[vq_idx];
877 PMD_INIT_FUNC_TRACE();
879 if (!virtio_with_packed_queue(hw)) {
880 if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
881 vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
890 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
894 * Requeue the discarded mbuf. This should always be
895 * successful since it was just dequeued.
897 if (virtio_with_packed_queue(vq->hw))
898 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
900 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
902 if (unlikely(error)) {
903 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
909 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
913 error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
914 if (unlikely(error)) {
915 PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
920 /* Optionally fill offload information in structure */
922 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
924 struct rte_net_hdr_lens hdr_lens;
925 uint32_t hdrlen, ptype;
926 int l4_supported = 0;
929 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
932 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
934 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
935 m->packet_type = ptype;
936 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
937 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
938 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
941 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
942 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
943 if (hdr->csum_start <= hdrlen && l4_supported) {
944 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
946 /* Unknown proto or tunnel, do sw cksum. We can assume
947 * the cksum field is in the first segment since the
948 * buffers we provided to the host are large enough.
949 * In case of SCTP, this will be wrong since it's a CRC
950 * but there's nothing we can do.
952 uint16_t csum = 0, off;
954 if (rte_raw_cksum_mbuf(m, hdr->csum_start,
955 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
958 if (likely(csum != 0xffff))
960 off = hdr->csum_offset + hdr->csum_start;
961 if (rte_pktmbuf_data_len(m) >= off + 1)
962 *rte_pktmbuf_mtod_offset(m, uint16_t *,
965 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
966 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
969 /* GSO request, save required information in mbuf */
970 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
971 /* Check unsupported modes */
972 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
973 (hdr->gso_size == 0)) {
977 /* Update mss lengthes in mbuf */
978 m->tso_segsz = hdr->gso_size;
979 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
980 case VIRTIO_NET_HDR_GSO_TCPV4:
981 case VIRTIO_NET_HDR_GSO_TCPV6:
982 m->ol_flags |= PKT_RX_LRO | \
983 PKT_RX_L4_CKSUM_NONE;
993 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
995 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
997 struct virtnet_rx *rxvq = rx_queue;
998 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
999 struct virtio_hw *hw = vq->hw;
1000 struct rte_mbuf *rxm;
1001 uint16_t nb_used, num, nb_rx;
1002 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1003 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1005 uint32_t i, nb_enqueued;
1007 struct virtio_net_hdr *hdr;
1010 if (unlikely(hw->started == 0))
1013 nb_used = virtqueue_nused(vq);
1015 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1016 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1017 num = VIRTIO_MBUF_BURST_SZ;
1018 if (likely(num > DESC_PER_CACHELINE))
1019 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1021 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1022 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1025 hdr_size = hw->vtnet_hdr_size;
1027 for (i = 0; i < num ; i++) {
1030 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1032 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1033 PMD_RX_LOG(ERR, "Packet drop");
1035 virtio_discard_rxbuf(vq, rxm);
1036 rxvq->stats.errors++;
1040 rxm->port = rxvq->port_id;
1041 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1045 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1046 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1048 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1049 RTE_PKTMBUF_HEADROOM - hdr_size);
1052 rte_vlan_strip(rxm);
1054 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1055 virtio_discard_rxbuf(vq, rxm);
1056 rxvq->stats.errors++;
1060 virtio_rx_stats_updated(rxvq, rxm);
1062 rx_pkts[nb_rx++] = rxm;
1065 rxvq->stats.packets += nb_rx;
1067 /* Allocate new mbuf for the used descriptor */
1068 if (likely(!virtqueue_full(vq))) {
1069 uint16_t free_cnt = vq->vq_free_cnt;
1070 struct rte_mbuf *new_pkts[free_cnt];
1072 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1074 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1076 if (unlikely(error)) {
1077 for (i = 0; i < free_cnt; i++)
1078 rte_pktmbuf_free(new_pkts[i]);
1080 nb_enqueued += free_cnt;
1082 struct rte_eth_dev *dev =
1083 &rte_eth_devices[rxvq->port_id];
1084 dev->data->rx_mbuf_alloc_failed += free_cnt;
1088 if (likely(nb_enqueued)) {
1089 vq_update_avail_idx(vq);
1091 if (unlikely(virtqueue_kick_prepare(vq))) {
1092 virtqueue_notify(vq);
1093 PMD_RX_LOG(DEBUG, "Notified");
1101 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1104 struct virtnet_rx *rxvq = rx_queue;
1105 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1106 struct virtio_hw *hw = vq->hw;
1107 struct rte_mbuf *rxm;
1108 uint16_t num, nb_rx;
1109 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1110 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1112 uint32_t i, nb_enqueued;
1114 struct virtio_net_hdr *hdr;
1117 if (unlikely(hw->started == 0))
1120 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1121 if (likely(num > DESC_PER_CACHELINE))
1122 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1124 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1125 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1128 hdr_size = hw->vtnet_hdr_size;
1130 for (i = 0; i < num; i++) {
1133 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1135 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1136 PMD_RX_LOG(ERR, "Packet drop");
1138 virtio_discard_rxbuf(vq, rxm);
1139 rxvq->stats.errors++;
1143 rxm->port = rxvq->port_id;
1144 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1148 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1149 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1151 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1152 RTE_PKTMBUF_HEADROOM - hdr_size);
1155 rte_vlan_strip(rxm);
1157 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1158 virtio_discard_rxbuf(vq, rxm);
1159 rxvq->stats.errors++;
1163 virtio_rx_stats_updated(rxvq, rxm);
1165 rx_pkts[nb_rx++] = rxm;
1168 rxvq->stats.packets += nb_rx;
1170 /* Allocate new mbuf for the used descriptor */
1171 if (likely(!virtqueue_full(vq))) {
1172 uint16_t free_cnt = vq->vq_free_cnt;
1173 struct rte_mbuf *new_pkts[free_cnt];
1175 if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1177 error = virtqueue_enqueue_recv_refill_packed(vq,
1178 new_pkts, free_cnt);
1179 if (unlikely(error)) {
1180 for (i = 0; i < free_cnt; i++)
1181 rte_pktmbuf_free(new_pkts[i]);
1183 nb_enqueued += free_cnt;
1185 struct rte_eth_dev *dev =
1186 &rte_eth_devices[rxvq->port_id];
1187 dev->data->rx_mbuf_alloc_failed += free_cnt;
1191 if (likely(nb_enqueued)) {
1192 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1193 virtqueue_notify(vq);
1194 PMD_RX_LOG(DEBUG, "Notified");
1203 virtio_recv_pkts_inorder(void *rx_queue,
1204 struct rte_mbuf **rx_pkts,
1207 struct virtnet_rx *rxvq = rx_queue;
1208 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1209 struct virtio_hw *hw = vq->hw;
1210 struct rte_mbuf *rxm;
1211 struct rte_mbuf *prev = NULL;
1212 uint16_t nb_used, num, nb_rx;
1213 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1214 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1216 uint32_t nb_enqueued;
1223 if (unlikely(hw->started == 0))
1226 nb_used = virtqueue_nused(vq);
1227 nb_used = RTE_MIN(nb_used, nb_pkts);
1228 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1230 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1235 hdr_size = hw->vtnet_hdr_size;
1237 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1239 for (i = 0; i < num; i++) {
1240 struct virtio_net_hdr_mrg_rxbuf *header;
1242 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1243 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1247 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1248 PMD_RX_LOG(ERR, "Packet drop");
1250 virtio_discard_rxbuf_inorder(vq, rxm);
1251 rxvq->stats.errors++;
1255 header = (struct virtio_net_hdr_mrg_rxbuf *)
1256 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1259 if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1260 seg_num = header->num_buffers;
1267 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1268 rxm->nb_segs = seg_num;
1271 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1272 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1274 rxm->port = rxvq->port_id;
1276 rx_pkts[nb_rx] = rxm;
1279 if (vq->hw->has_rx_offload &&
1280 virtio_rx_offload(rxm, &header->hdr) < 0) {
1281 virtio_discard_rxbuf_inorder(vq, rxm);
1282 rxvq->stats.errors++;
1287 rte_vlan_strip(rx_pkts[nb_rx]);
1289 seg_res = seg_num - 1;
1291 /* Merge remaining segments */
1292 while (seg_res != 0 && i < (num - 1)) {
1296 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1297 rxm->pkt_len = (uint32_t)(len[i]);
1298 rxm->data_len = (uint16_t)(len[i]);
1300 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1308 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1313 /* Last packet still need merge segments */
1314 while (seg_res != 0) {
1315 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1316 VIRTIO_MBUF_BURST_SZ);
1318 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1319 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1321 uint16_t extra_idx = 0;
1324 while (extra_idx < rcv_cnt) {
1325 rxm = rcv_pkts[extra_idx];
1327 RTE_PKTMBUF_HEADROOM - hdr_size;
1328 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1329 rxm->data_len = (uint16_t)(len[extra_idx]);
1332 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1338 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1343 "No enough segments for packet.");
1344 rte_pktmbuf_free(rx_pkts[nb_rx]);
1345 rxvq->stats.errors++;
1350 rxvq->stats.packets += nb_rx;
1352 /* Allocate new mbuf for the used descriptor */
1354 if (likely(!virtqueue_full(vq))) {
1355 /* free_cnt may include mrg descs */
1356 uint16_t free_cnt = vq->vq_free_cnt;
1357 struct rte_mbuf *new_pkts[free_cnt];
1359 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1360 error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1362 if (unlikely(error)) {
1363 for (i = 0; i < free_cnt; i++)
1364 rte_pktmbuf_free(new_pkts[i]);
1366 nb_enqueued += free_cnt;
1368 struct rte_eth_dev *dev =
1369 &rte_eth_devices[rxvq->port_id];
1370 dev->data->rx_mbuf_alloc_failed += free_cnt;
1374 if (likely(nb_enqueued)) {
1375 vq_update_avail_idx(vq);
1377 if (unlikely(virtqueue_kick_prepare(vq))) {
1378 virtqueue_notify(vq);
1379 PMD_RX_LOG(DEBUG, "Notified");
1387 virtio_recv_mergeable_pkts(void *rx_queue,
1388 struct rte_mbuf **rx_pkts,
1391 struct virtnet_rx *rxvq = rx_queue;
1392 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1393 struct virtio_hw *hw = vq->hw;
1394 struct rte_mbuf *rxm;
1395 struct rte_mbuf *prev = NULL;
1396 uint16_t nb_used, num, nb_rx = 0;
1397 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1398 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1400 uint32_t nb_enqueued = 0;
1401 uint32_t seg_num = 0;
1402 uint32_t seg_res = 0;
1403 uint32_t hdr_size = hw->vtnet_hdr_size;
1406 if (unlikely(hw->started == 0))
1409 nb_used = virtqueue_nused(vq);
1411 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1413 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1414 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1415 num = VIRTIO_MBUF_BURST_SZ;
1416 if (likely(num > DESC_PER_CACHELINE))
1417 num = num - ((vq->vq_used_cons_idx + num) %
1418 DESC_PER_CACHELINE);
1421 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1423 for (i = 0; i < num; i++) {
1424 struct virtio_net_hdr_mrg_rxbuf *header;
1426 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1427 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1431 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1432 PMD_RX_LOG(ERR, "Packet drop");
1434 virtio_discard_rxbuf(vq, rxm);
1435 rxvq->stats.errors++;
1439 header = (struct virtio_net_hdr_mrg_rxbuf *)
1440 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1442 seg_num = header->num_buffers;
1446 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1447 rxm->nb_segs = seg_num;
1450 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1451 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1453 rxm->port = rxvq->port_id;
1455 rx_pkts[nb_rx] = rxm;
1458 if (hw->has_rx_offload &&
1459 virtio_rx_offload(rxm, &header->hdr) < 0) {
1460 virtio_discard_rxbuf(vq, rxm);
1461 rxvq->stats.errors++;
1466 rte_vlan_strip(rx_pkts[nb_rx]);
1468 seg_res = seg_num - 1;
1470 /* Merge remaining segments */
1471 while (seg_res != 0 && i < (num - 1)) {
1475 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1476 rxm->pkt_len = (uint32_t)(len[i]);
1477 rxm->data_len = (uint16_t)(len[i]);
1479 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1487 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1492 /* Last packet still need merge segments */
1493 while (seg_res != 0) {
1494 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1495 VIRTIO_MBUF_BURST_SZ);
1497 if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1498 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1500 uint16_t extra_idx = 0;
1503 while (extra_idx < rcv_cnt) {
1504 rxm = rcv_pkts[extra_idx];
1506 RTE_PKTMBUF_HEADROOM - hdr_size;
1507 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1508 rxm->data_len = (uint16_t)(len[extra_idx]);
1511 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1517 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1522 "No enough segments for packet.");
1523 rte_pktmbuf_free(rx_pkts[nb_rx]);
1524 rxvq->stats.errors++;
1529 rxvq->stats.packets += nb_rx;
1531 /* Allocate new mbuf for the used descriptor */
1532 if (likely(!virtqueue_full(vq))) {
1533 /* free_cnt may include mrg descs */
1534 uint16_t free_cnt = vq->vq_free_cnt;
1535 struct rte_mbuf *new_pkts[free_cnt];
1537 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1538 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1540 if (unlikely(error)) {
1541 for (i = 0; i < free_cnt; i++)
1542 rte_pktmbuf_free(new_pkts[i]);
1544 nb_enqueued += free_cnt;
1546 struct rte_eth_dev *dev =
1547 &rte_eth_devices[rxvq->port_id];
1548 dev->data->rx_mbuf_alloc_failed += free_cnt;
1552 if (likely(nb_enqueued)) {
1553 vq_update_avail_idx(vq);
1555 if (unlikely(virtqueue_kick_prepare(vq))) {
1556 virtqueue_notify(vq);
1557 PMD_RX_LOG(DEBUG, "Notified");
1565 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1566 struct rte_mbuf **rx_pkts,
1569 struct virtnet_rx *rxvq = rx_queue;
1570 struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1571 struct virtio_hw *hw = vq->hw;
1572 struct rte_mbuf *rxm;
1573 struct rte_mbuf *prev = NULL;
1574 uint16_t num, nb_rx = 0;
1575 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1576 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1577 uint32_t nb_enqueued = 0;
1578 uint32_t seg_num = 0;
1579 uint32_t seg_res = 0;
1580 uint32_t hdr_size = hw->vtnet_hdr_size;
1584 if (unlikely(hw->started == 0))
1589 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1590 num = VIRTIO_MBUF_BURST_SZ;
1591 if (likely(num > DESC_PER_CACHELINE))
1592 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1594 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1596 for (i = 0; i < num; i++) {
1597 struct virtio_net_hdr_mrg_rxbuf *header;
1599 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1600 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1604 if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1605 PMD_RX_LOG(ERR, "Packet drop");
1607 virtio_discard_rxbuf(vq, rxm);
1608 rxvq->stats.errors++;
1612 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1613 rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1614 seg_num = header->num_buffers;
1619 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1620 rxm->nb_segs = seg_num;
1623 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1624 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1626 rxm->port = rxvq->port_id;
1627 rx_pkts[nb_rx] = rxm;
1630 if (hw->has_rx_offload &&
1631 virtio_rx_offload(rxm, &header->hdr) < 0) {
1632 virtio_discard_rxbuf(vq, rxm);
1633 rxvq->stats.errors++;
1638 rte_vlan_strip(rx_pkts[nb_rx]);
1640 seg_res = seg_num - 1;
1642 /* Merge remaining segments */
1643 while (seg_res != 0 && i < (num - 1)) {
1647 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1648 rxm->pkt_len = (uint32_t)(len[i]);
1649 rxm->data_len = (uint16_t)(len[i]);
1651 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1659 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1664 /* Last packet still need merge segments */
1665 while (seg_res != 0) {
1666 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1667 VIRTIO_MBUF_BURST_SZ);
1668 uint16_t extra_idx = 0;
1670 rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1672 if (unlikely(rcv_cnt == 0)) {
1673 PMD_RX_LOG(ERR, "No enough segments for packet.");
1674 rte_pktmbuf_free(rx_pkts[nb_rx]);
1675 rxvq->stats.errors++;
1679 while (extra_idx < rcv_cnt) {
1680 rxm = rcv_pkts[extra_idx];
1682 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1683 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1684 rxm->data_len = (uint16_t)(len[extra_idx]);
1688 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1693 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1698 rxvq->stats.packets += nb_rx;
1700 /* Allocate new mbuf for the used descriptor */
1701 if (likely(!virtqueue_full(vq))) {
1702 /* free_cnt may include mrg descs */
1703 uint16_t free_cnt = vq->vq_free_cnt;
1704 struct rte_mbuf *new_pkts[free_cnt];
1706 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1707 error = virtqueue_enqueue_recv_refill_packed(vq,
1708 new_pkts, free_cnt);
1709 if (unlikely(error)) {
1710 for (i = 0; i < free_cnt; i++)
1711 rte_pktmbuf_free(new_pkts[i]);
1713 nb_enqueued += free_cnt;
1715 struct rte_eth_dev *dev =
1716 &rte_eth_devices[rxvq->port_id];
1717 dev->data->rx_mbuf_alloc_failed += free_cnt;
1721 if (likely(nb_enqueued)) {
1722 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1723 virtqueue_notify(vq);
1724 PMD_RX_LOG(DEBUG, "Notified");
1732 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1738 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1739 struct rte_mbuf *m = tx_pkts[nb_tx];
1741 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1742 error = rte_validate_tx_offload(m);
1743 if (unlikely(error)) {
1749 /* Do VLAN tag insertion */
1750 if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
1751 error = rte_vlan_insert(&m);
1752 /* rte_vlan_insert() may change pointer
1753 * even in the case of failure
1757 if (unlikely(error)) {
1763 error = rte_net_intel_cksum_prepare(m);
1764 if (unlikely(error)) {
1769 if (m->ol_flags & PKT_TX_TCP_SEG)
1770 virtio_tso_fix_cksum(m);
1777 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1780 struct virtnet_tx *txvq = tx_queue;
1781 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1782 struct virtio_hw *hw = vq->hw;
1783 uint16_t hdr_size = hw->vtnet_hdr_size;
1785 bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1787 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1790 if (unlikely(nb_pkts < 1))
1793 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1795 if (nb_pkts > vq->vq_free_cnt)
1796 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1799 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1800 struct rte_mbuf *txm = tx_pkts[nb_tx];
1801 int can_push = 0, use_indirect = 0, slots, need;
1803 /* optimize ring usage */
1804 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1805 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1806 rte_mbuf_refcnt_read(txm) == 1 &&
1807 RTE_MBUF_DIRECT(txm) &&
1808 txm->nb_segs == 1 &&
1809 rte_pktmbuf_headroom(txm) >= hdr_size &&
1810 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1811 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1813 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1814 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1816 /* How many main ring entries are needed to this Tx?
1818 * any_layout => number of segments
1819 * default => number of segments + 1
1821 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1822 need = slots - vq->vq_free_cnt;
1824 /* Positive value indicates it need free vring descriptors */
1825 if (unlikely(need > 0)) {
1826 virtio_xmit_cleanup_packed(vq, need, in_order);
1827 need = slots - vq->vq_free_cnt;
1828 if (unlikely(need > 0)) {
1830 "No free tx descriptors to transmit");
1835 /* Enqueue Packet buffers */
1837 virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1839 virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1843 virtio_update_packet_stats(&txvq->stats, txm);
1846 txvq->stats.packets += nb_tx;
1848 if (likely(nb_tx)) {
1849 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1850 virtqueue_notify(vq);
1851 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1859 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1861 struct virtnet_tx *txvq = tx_queue;
1862 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1863 struct virtio_hw *hw = vq->hw;
1864 uint16_t hdr_size = hw->vtnet_hdr_size;
1865 uint16_t nb_used, nb_tx = 0;
1867 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1870 if (unlikely(nb_pkts < 1))
1873 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1875 nb_used = virtqueue_nused(vq);
1877 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1878 virtio_xmit_cleanup(vq, nb_used);
1880 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1881 struct rte_mbuf *txm = tx_pkts[nb_tx];
1882 int can_push = 0, use_indirect = 0, slots, need;
1884 /* optimize ring usage */
1885 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1886 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1887 rte_mbuf_refcnt_read(txm) == 1 &&
1888 RTE_MBUF_DIRECT(txm) &&
1889 txm->nb_segs == 1 &&
1890 rte_pktmbuf_headroom(txm) >= hdr_size &&
1891 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1892 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1894 else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1895 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1898 /* How many main ring entries are needed to this Tx?
1899 * any_layout => number of segments
1901 * default => number of segments + 1
1903 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1904 need = slots - vq->vq_free_cnt;
1906 /* Positive value indicates it need free vring descriptors */
1907 if (unlikely(need > 0)) {
1908 nb_used = virtqueue_nused(vq);
1910 need = RTE_MIN(need, (int)nb_used);
1912 virtio_xmit_cleanup(vq, need);
1913 need = slots - vq->vq_free_cnt;
1914 if (unlikely(need > 0)) {
1916 "No free tx descriptors to transmit");
1921 /* Enqueue Packet buffers */
1922 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1925 virtio_update_packet_stats(&txvq->stats, txm);
1928 txvq->stats.packets += nb_tx;
1930 if (likely(nb_tx)) {
1931 vq_update_avail_idx(vq);
1933 if (unlikely(virtqueue_kick_prepare(vq))) {
1934 virtqueue_notify(vq);
1935 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1942 static __rte_always_inline int
1943 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1945 uint16_t nb_used, nb_clean, nb_descs;
1947 nb_descs = vq->vq_free_cnt + need;
1948 nb_used = virtqueue_nused(vq);
1949 nb_clean = RTE_MIN(need, (int)nb_used);
1951 virtio_xmit_cleanup_inorder(vq, nb_clean);
1953 return nb_descs - vq->vq_free_cnt;
1957 virtio_xmit_pkts_inorder(void *tx_queue,
1958 struct rte_mbuf **tx_pkts,
1961 struct virtnet_tx *txvq = tx_queue;
1962 struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1963 struct virtio_hw *hw = vq->hw;
1964 uint16_t hdr_size = hw->vtnet_hdr_size;
1965 uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1966 struct rte_mbuf *inorder_pkts[nb_pkts];
1969 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1972 if (unlikely(nb_pkts < 1))
1976 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1977 nb_used = virtqueue_nused(vq);
1979 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1980 virtio_xmit_cleanup_inorder(vq, nb_used);
1982 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1983 struct rte_mbuf *txm = tx_pkts[nb_tx];
1986 /* optimize ring usage */
1987 if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1988 virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1989 rte_mbuf_refcnt_read(txm) == 1 &&
1990 RTE_MBUF_DIRECT(txm) &&
1991 txm->nb_segs == 1 &&
1992 rte_pktmbuf_headroom(txm) >= hdr_size &&
1993 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1994 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1995 inorder_pkts[nb_inorder_pkts] = txm;
2001 if (nb_inorder_pkts) {
2002 need = nb_inorder_pkts - vq->vq_free_cnt;
2003 if (unlikely(need > 0)) {
2004 need = virtio_xmit_try_cleanup_inorder(vq,
2006 if (unlikely(need > 0)) {
2008 "No free tx descriptors to "
2013 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2015 nb_inorder_pkts = 0;
2018 slots = txm->nb_segs + 1;
2019 need = slots - vq->vq_free_cnt;
2020 if (unlikely(need > 0)) {
2021 need = virtio_xmit_try_cleanup_inorder(vq, slots);
2023 if (unlikely(need > 0)) {
2025 "No free tx descriptors to transmit");
2029 /* Enqueue Packet buffers */
2030 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2032 virtio_update_packet_stats(&txvq->stats, txm);
2035 /* Transmit all inorder packets */
2036 if (nb_inorder_pkts) {
2037 need = nb_inorder_pkts - vq->vq_free_cnt;
2038 if (unlikely(need > 0)) {
2039 need = virtio_xmit_try_cleanup_inorder(vq,
2041 if (unlikely(need > 0)) {
2043 "No free tx descriptors to transmit");
2044 nb_inorder_pkts = vq->vq_free_cnt;
2049 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2053 txvq->stats.packets += nb_tx;
2055 if (likely(nb_tx)) {
2056 vq_update_avail_idx(vq);
2058 if (unlikely(virtqueue_kick_prepare(vq))) {
2059 virtqueue_notify(vq);
2060 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2070 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2071 struct rte_mbuf **rx_pkts __rte_unused,
2072 uint16_t nb_pkts __rte_unused)
2078 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2079 struct rte_mbuf **tx_pkts __rte_unused,
2080 uint16_t nb_pkts __rte_unused)