1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
45 struct virtnet_rx *rxvq = rxq;
46 struct virtqueue *vq = rxvq->vq;
48 return VIRTQUEUE_NUSED(vq) >= offset;
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
54 vq->vq_free_cnt += num;
55 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
61 struct vring_desc *dp, *dp_tail;
62 struct vq_desc_extra *dxp;
63 uint16_t desc_idx_last = desc_idx;
65 dp = &vq->vq_ring.desc[desc_idx];
66 dxp = &vq->vq_descx[desc_idx];
67 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 while (dp->flags & VRING_DESC_F_NEXT) {
70 desc_idx_last = dp->next;
71 dp = &vq->vq_ring.desc[dp->next];
77 * We must append the existing free chain, if any, to the end of
78 * newly freed chain. If the virtqueue was completely used, then
79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
81 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 vq->vq_desc_head_idx = desc_idx;
84 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
85 dp_tail->next = desc_idx;
88 vq->vq_desc_tail_idx = desc_idx_last;
89 dp->next = VQ_RING_DESC_CHAIN_END;
93 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
95 struct vq_desc_extra *dxp;
97 dxp = &vq->vq_descx[id];
98 vq->vq_free_cnt += dxp->ndescs;
100 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
101 vq->vq_desc_head_idx = id;
103 vq->vq_descx[vq->vq_desc_tail_idx].next = id;
105 vq->vq_desc_tail_idx = id;
106 dxp->next = VQ_RING_DESC_CHAIN_END;
110 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
111 struct rte_mbuf **rx_pkts,
115 struct rte_mbuf *cookie;
118 struct vring_packed_desc *desc;
121 desc = vq->ring_packed.desc_packed;
123 for (i = 0; i < num; i++) {
124 used_idx = vq->vq_used_cons_idx;
125 if (!desc_is_used(&desc[used_idx], vq))
127 len[i] = desc[used_idx].len;
128 id = desc[used_idx].id;
129 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
130 if (unlikely(cookie == NULL)) {
131 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
132 vq->vq_used_cons_idx);
135 rte_prefetch0(cookie);
136 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
140 vq->vq_used_cons_idx++;
141 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
142 vq->vq_used_cons_idx -= vq->vq_nentries;
143 vq->used_wrap_counter ^= 1;
151 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
152 uint32_t *len, uint16_t num)
154 struct vring_used_elem *uep;
155 struct rte_mbuf *cookie;
156 uint16_t used_idx, desc_idx;
159 /* Caller does the check */
160 for (i = 0; i < num ; i++) {
161 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
162 uep = &vq->vq_ring.used->ring[used_idx];
163 desc_idx = (uint16_t) uep->id;
165 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
167 if (unlikely(cookie == NULL)) {
168 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
169 vq->vq_used_cons_idx);
173 rte_prefetch0(cookie);
174 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
176 vq->vq_used_cons_idx++;
177 vq_ring_free_chain(vq, desc_idx);
178 vq->vq_descx[desc_idx].cookie = NULL;
185 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
186 struct rte_mbuf **rx_pkts,
190 struct vring_used_elem *uep;
191 struct rte_mbuf *cookie;
192 uint16_t used_idx = 0;
195 if (unlikely(num == 0))
198 for (i = 0; i < num; i++) {
199 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
200 /* Desc idx same as used idx */
201 uep = &vq->vq_ring.used->ring[used_idx];
203 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
205 if (unlikely(cookie == NULL)) {
206 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
207 vq->vq_used_cons_idx);
211 rte_prefetch0(cookie);
212 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
214 vq->vq_used_cons_idx++;
215 vq->vq_descx[used_idx].cookie = NULL;
218 vq_ring_free_inorder(vq, used_idx, i);
222 #ifndef DEFAULT_TX_FREE_THRESH
223 #define DEFAULT_TX_FREE_THRESH 32
226 /* Cleanup from completed transmits. */
228 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
230 uint16_t used_idx, id;
231 uint16_t size = vq->vq_nentries;
232 struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
233 struct vq_desc_extra *dxp;
235 used_idx = vq->vq_used_cons_idx;
236 while (num-- && desc_is_used(&desc[used_idx], vq)) {
237 virtio_rmb(vq->hw->weak_barriers);
238 id = desc[used_idx].id;
239 dxp = &vq->vq_descx[id];
240 vq->vq_used_cons_idx += dxp->ndescs;
241 if (vq->vq_used_cons_idx >= size) {
242 vq->vq_used_cons_idx -= size;
243 vq->used_wrap_counter ^= 1;
245 vq_ring_free_id_packed(vq, id);
246 if (dxp->cookie != NULL) {
247 rte_pktmbuf_free(dxp->cookie);
250 used_idx = vq->vq_used_cons_idx;
255 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
257 uint16_t i, used_idx, desc_idx;
258 for (i = 0; i < num; i++) {
259 struct vring_used_elem *uep;
260 struct vq_desc_extra *dxp;
262 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
263 uep = &vq->vq_ring.used->ring[used_idx];
265 desc_idx = (uint16_t) uep->id;
266 dxp = &vq->vq_descx[desc_idx];
267 vq->vq_used_cons_idx++;
268 vq_ring_free_chain(vq, desc_idx);
270 if (dxp->cookie != NULL) {
271 rte_pktmbuf_free(dxp->cookie);
277 /* Cleanup from completed inorder transmits. */
279 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
281 uint16_t i, used_idx, desc_idx = 0, last_idx;
282 int16_t free_cnt = 0;
283 struct vq_desc_extra *dxp = NULL;
285 if (unlikely(num == 0))
288 for (i = 0; i < num; i++) {
289 struct vring_used_elem *uep;
291 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
292 uep = &vq->vq_ring.used->ring[used_idx];
293 desc_idx = (uint16_t)uep->id;
295 dxp = &vq->vq_descx[desc_idx];
296 vq->vq_used_cons_idx++;
298 if (dxp->cookie != NULL) {
299 rte_pktmbuf_free(dxp->cookie);
304 last_idx = desc_idx + dxp->ndescs - 1;
305 free_cnt = last_idx - vq->vq_desc_tail_idx;
307 free_cnt += vq->vq_nentries;
309 vq_ring_free_inorder(vq, last_idx, free_cnt);
313 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
314 struct rte_mbuf **cookies,
317 struct vq_desc_extra *dxp;
318 struct virtio_hw *hw = vq->hw;
319 struct vring_desc *start_dp;
320 uint16_t head_idx, idx, i = 0;
322 if (unlikely(vq->vq_free_cnt == 0))
324 if (unlikely(vq->vq_free_cnt < num))
327 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
328 start_dp = vq->vq_ring.desc;
331 idx = head_idx & (vq->vq_nentries - 1);
332 dxp = &vq->vq_descx[idx];
333 dxp->cookie = (void *)cookies[i];
337 VIRTIO_MBUF_ADDR(cookies[i], vq) +
338 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
340 cookies[i]->buf_len -
341 RTE_PKTMBUF_HEADROOM +
343 start_dp[idx].flags = VRING_DESC_F_WRITE;
345 vq_update_avail_ring(vq, idx);
350 vq->vq_desc_head_idx += num;
351 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
356 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
359 struct vq_desc_extra *dxp;
360 struct virtio_hw *hw = vq->hw;
361 struct vring_desc *start_dp = vq->vq_ring.desc;
364 if (unlikely(vq->vq_free_cnt == 0))
366 if (unlikely(vq->vq_free_cnt < num))
369 if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
372 for (i = 0; i < num; i++) {
373 idx = vq->vq_desc_head_idx;
374 dxp = &vq->vq_descx[idx];
375 dxp->cookie = (void *)cookie[i];
379 VIRTIO_MBUF_ADDR(cookie[i], vq) +
380 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
382 cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
384 start_dp[idx].flags = VRING_DESC_F_WRITE;
385 vq->vq_desc_head_idx = start_dp[idx].next;
386 vq_update_avail_ring(vq, idx);
387 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
388 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
393 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
399 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
400 struct rte_mbuf **cookie, uint16_t num)
402 struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
403 uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
404 struct virtio_hw *hw = vq->hw;
405 struct vq_desc_extra *dxp;
409 if (unlikely(vq->vq_free_cnt == 0))
411 if (unlikely(vq->vq_free_cnt < num))
414 for (i = 0; i < num; i++) {
415 idx = vq->vq_avail_idx;
416 dxp = &vq->vq_descx[idx];
417 dxp->cookie = (void *)cookie[i];
420 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
421 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
422 start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
423 + hw->vtnet_hdr_size;
425 vq->vq_desc_head_idx = dxp->next;
426 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
427 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
428 virtio_wmb(hw->weak_barriers);
429 start_dp[idx].flags = flags;
430 if (++vq->vq_avail_idx >= vq->vq_nentries) {
431 vq->vq_avail_idx -= vq->vq_nentries;
432 vq->avail_wrap_counter ^= 1;
433 vq->avail_used_flags =
434 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
435 VRING_DESC_F_USED(!vq->avail_wrap_counter);
436 flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
439 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
443 /* When doing TSO, the IP length is not included in the pseudo header
444 * checksum of the packet given to the PMD, but for virtio it is
448 virtio_tso_fix_cksum(struct rte_mbuf *m)
450 /* common case: header is not fragmented */
451 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
453 struct ipv4_hdr *iph;
454 struct ipv6_hdr *ip6h;
456 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
459 iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
460 th = RTE_PTR_ADD(iph, m->l3_len);
461 if ((iph->version_ihl >> 4) == 4) {
462 iph->hdr_checksum = 0;
463 iph->hdr_checksum = rte_ipv4_cksum(iph);
464 ip_len = iph->total_length;
465 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
468 ip6h = (struct ipv6_hdr *)iph;
469 ip_paylen = ip6h->payload_len;
472 /* calculate the new phdr checksum not including ip_paylen */
473 prev_cksum = th->cksum;
476 tmp = (tmp & 0xffff) + (tmp >> 16);
479 /* replace it in the packet */
480 th->cksum = new_cksum;
485 /* avoid write operation when necessary, to lessen cache issues */
486 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
487 if ((var) != (val)) \
492 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
493 struct rte_mbuf *cookie,
497 if (cookie->ol_flags & PKT_TX_TCP_SEG)
498 cookie->ol_flags |= PKT_TX_TCP_CKSUM;
500 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
501 case PKT_TX_UDP_CKSUM:
502 hdr->csum_start = cookie->l2_len + cookie->l3_len;
503 hdr->csum_offset = offsetof(struct udp_hdr,
505 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
508 case PKT_TX_TCP_CKSUM:
509 hdr->csum_start = cookie->l2_len + cookie->l3_len;
510 hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
511 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
515 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
516 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
517 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
521 /* TCP Segmentation Offload */
522 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
523 virtio_tso_fix_cksum(cookie);
524 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
525 VIRTIO_NET_HDR_GSO_TCPV6 :
526 VIRTIO_NET_HDR_GSO_TCPV4;
527 hdr->gso_size = cookie->tso_segsz;
533 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
534 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
535 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
541 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
542 struct rte_mbuf **cookies,
545 struct vq_desc_extra *dxp;
546 struct virtqueue *vq = txvq->vq;
547 struct vring_desc *start_dp;
548 struct virtio_net_hdr *hdr;
550 uint16_t head_size = vq->hw->vtnet_hdr_size;
553 idx = vq->vq_desc_head_idx;
554 start_dp = vq->vq_ring.desc;
557 idx = idx & (vq->vq_nentries - 1);
558 dxp = &vq->vq_descx[idx];
559 dxp->cookie = (void *)cookies[i];
562 hdr = (struct virtio_net_hdr *)
563 rte_pktmbuf_prepend(cookies[i], head_size);
564 cookies[i]->pkt_len -= head_size;
566 /* if offload disabled, it is not zeroed below, do it now */
567 if (!vq->hw->has_tx_offload) {
568 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
569 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
570 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
571 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
572 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
573 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
576 virtqueue_xmit_offload(hdr, cookies[i],
577 vq->hw->has_tx_offload);
579 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
580 start_dp[idx].len = cookies[i]->data_len;
581 start_dp[idx].flags = 0;
583 vq_update_avail_ring(vq, idx);
589 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
590 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
594 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
595 uint16_t needed, int can_push)
597 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
598 struct vq_desc_extra *dxp;
599 struct virtqueue *vq = txvq->vq;
600 struct vring_packed_desc *start_dp, *head_dp;
601 uint16_t idx, id, head_idx, head_flags;
602 uint16_t head_size = vq->hw->vtnet_hdr_size;
603 struct virtio_net_hdr *hdr;
606 id = vq->vq_desc_head_idx;
608 dxp = &vq->vq_descx[id];
609 dxp->ndescs = needed;
610 dxp->cookie = cookie;
612 head_idx = vq->vq_avail_idx;
615 start_dp = vq->ring_packed.desc_packed;
617 head_dp = &vq->ring_packed.desc_packed[idx];
618 head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
619 head_flags |= vq->avail_used_flags;
622 /* prepend cannot fail, checked by caller */
623 hdr = (struct virtio_net_hdr *)
624 rte_pktmbuf_prepend(cookie, head_size);
625 /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
626 * which is wrong. Below subtract restores correct pkt size.
628 cookie->pkt_len -= head_size;
630 /* if offload disabled, it is not zeroed below, do it now */
631 if (!vq->hw->has_tx_offload) {
632 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
633 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
634 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
635 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
636 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
637 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
640 /* setup first tx ring slot to point to header
641 * stored in reserved region.
643 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
644 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
645 start_dp[idx].len = vq->hw->vtnet_hdr_size;
646 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
648 if (idx >= vq->vq_nentries) {
649 idx -= vq->vq_nentries;
650 vq->avail_wrap_counter ^= 1;
651 vq->avail_used_flags =
652 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
653 VRING_DESC_F_USED(!vq->avail_wrap_counter);
657 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
662 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
663 start_dp[idx].len = cookie->data_len;
664 if (likely(idx != head_idx)) {
665 flags = cookie->next ? VRING_DESC_F_NEXT : 0;
666 flags |= vq->avail_used_flags;
667 start_dp[idx].flags = flags;
671 if (idx >= vq->vq_nentries) {
672 idx -= vq->vq_nentries;
673 vq->avail_wrap_counter ^= 1;
674 vq->avail_used_flags =
675 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
676 VRING_DESC_F_USED(!vq->avail_wrap_counter);
678 } while ((cookie = cookie->next) != NULL);
680 start_dp[prev].id = id;
682 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
684 vq->vq_desc_head_idx = dxp->next;
685 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
686 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
688 vq->vq_avail_idx = idx;
690 virtio_wmb(vq->hw->weak_barriers);
691 head_dp->flags = head_flags;
695 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
696 uint16_t needed, int use_indirect, int can_push,
699 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
700 struct vq_desc_extra *dxp;
701 struct virtqueue *vq = txvq->vq;
702 struct vring_desc *start_dp;
703 uint16_t seg_num = cookie->nb_segs;
704 uint16_t head_idx, idx;
705 uint16_t head_size = vq->hw->vtnet_hdr_size;
706 struct virtio_net_hdr *hdr;
708 head_idx = vq->vq_desc_head_idx;
710 dxp = &vq->vq_descx[idx];
711 dxp->cookie = (void *)cookie;
712 dxp->ndescs = needed;
714 start_dp = vq->vq_ring.desc;
717 /* prepend cannot fail, checked by caller */
718 hdr = (struct virtio_net_hdr *)
719 rte_pktmbuf_prepend(cookie, head_size);
720 /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
721 * which is wrong. Below subtract restores correct pkt size.
723 cookie->pkt_len -= head_size;
725 /* if offload disabled, it is not zeroed below, do it now */
726 if (!vq->hw->has_tx_offload) {
727 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
728 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
729 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
730 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
731 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
732 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
734 } else if (use_indirect) {
735 /* setup tx ring slot to point to indirect
736 * descriptor list stored in reserved region.
738 * the first slot in indirect ring is already preset
739 * to point to the header in reserved region
741 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
742 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
743 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
744 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
745 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
747 /* loop below will fill in rest of the indirect elements */
748 start_dp = txr[idx].tx_indir;
751 /* setup first tx ring slot to point to header
752 * stored in reserved region.
754 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
755 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
756 start_dp[idx].len = vq->hw->vtnet_hdr_size;
757 start_dp[idx].flags = VRING_DESC_F_NEXT;
758 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
760 idx = start_dp[idx].next;
763 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
766 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
767 start_dp[idx].len = cookie->data_len;
768 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
769 idx = start_dp[idx].next;
770 } while ((cookie = cookie->next) != NULL);
773 idx = vq->vq_ring.desc[head_idx].next;
775 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
777 vq->vq_desc_head_idx = idx;
778 vq_update_avail_ring(vq, head_idx);
781 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
782 vq->vq_desc_tail_idx = idx;
787 virtio_dev_cq_start(struct rte_eth_dev *dev)
789 struct virtio_hw *hw = dev->data->dev_private;
791 if (hw->cvq && hw->cvq->vq) {
792 rte_spinlock_init(&hw->cvq->lock);
793 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
798 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
801 unsigned int socket_id __rte_unused,
802 const struct rte_eth_rxconf *rx_conf __rte_unused,
803 struct rte_mempool *mp)
805 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
806 struct virtio_hw *hw = dev->data->dev_private;
807 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
808 struct virtnet_rx *rxvq;
810 PMD_INIT_FUNC_TRACE();
812 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
813 nb_desc = vq->vq_nentries;
814 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
817 rxvq->queue_id = queue_idx;
819 if (rxvq->mpool == NULL) {
820 rte_exit(EXIT_FAILURE,
821 "Cannot allocate mbufs for rx virtqueue");
824 dev->data->rx_queues[queue_idx] = rxvq;
830 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
832 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
833 struct virtio_hw *hw = dev->data->dev_private;
834 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
835 struct virtnet_rx *rxvq = &vq->rxq;
840 PMD_INIT_FUNC_TRACE();
842 /* Allocate blank mbufs for the each rx descriptor */
845 if (hw->use_simple_rx) {
846 for (desc_idx = 0; desc_idx < vq->vq_nentries;
848 vq->vq_ring.avail->ring[desc_idx] = desc_idx;
849 vq->vq_ring.desc[desc_idx].flags =
853 virtio_rxq_vec_setup(rxvq);
856 memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
857 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
859 vq->sw_ring[vq->vq_nentries + desc_idx] =
863 if (hw->use_simple_rx) {
864 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
865 virtio_rxq_rearm_vec(rxvq);
866 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
868 } else if (hw->use_inorder_rx) {
869 if ((!virtqueue_full(vq))) {
870 uint16_t free_cnt = vq->vq_free_cnt;
871 struct rte_mbuf *pkts[free_cnt];
873 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
875 error = virtqueue_enqueue_refill_inorder(vq,
878 if (unlikely(error)) {
879 for (i = 0; i < free_cnt; i++)
880 rte_pktmbuf_free(pkts[i]);
885 vq_update_avail_idx(vq);
888 while (!virtqueue_full(vq)) {
889 m = rte_mbuf_raw_alloc(rxvq->mpool);
893 /* Enqueue allocated buffers */
894 if (vtpci_packed_queue(vq->hw))
895 error = virtqueue_enqueue_recv_refill_packed(vq,
898 error = virtqueue_enqueue_recv_refill(vq,
907 if (!vtpci_packed_queue(vq->hw))
908 vq_update_avail_idx(vq);
911 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
919 * struct rte_eth_dev *dev: Used to update dev
920 * uint16_t nb_desc: Defaults to values read from config space
921 * unsigned int socket_id: Used to allocate memzone
922 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
923 * uint16_t queue_idx: Just used as an index in dev txq list
926 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
929 unsigned int socket_id __rte_unused,
930 const struct rte_eth_txconf *tx_conf)
932 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
933 struct virtio_hw *hw = dev->data->dev_private;
934 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
935 struct virtnet_tx *txvq;
936 uint16_t tx_free_thresh;
938 PMD_INIT_FUNC_TRACE();
940 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
941 nb_desc = vq->vq_nentries;
942 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
945 txvq->queue_id = queue_idx;
947 tx_free_thresh = tx_conf->tx_free_thresh;
948 if (tx_free_thresh == 0)
950 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
952 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
953 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
954 "number of TX entries minus 3 (%u)."
955 " (tx_free_thresh=%u port=%u queue=%u)\n",
957 tx_free_thresh, dev->data->port_id, queue_idx);
961 vq->vq_free_thresh = tx_free_thresh;
963 dev->data->tx_queues[queue_idx] = txvq;
968 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
971 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
972 struct virtio_hw *hw = dev->data->dev_private;
973 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
975 PMD_INIT_FUNC_TRACE();
977 if (!vtpci_packed_queue(hw)) {
978 if (hw->use_inorder_tx)
979 vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
988 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
992 * Requeue the discarded mbuf. This should always be
993 * successful since it was just dequeued.
995 if (vtpci_packed_queue(vq->hw))
996 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
998 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
1000 if (unlikely(error)) {
1001 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1002 rte_pktmbuf_free(m);
1007 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
1011 error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
1012 if (unlikely(error)) {
1013 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1014 rte_pktmbuf_free(m);
1019 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
1021 uint32_t s = mbuf->pkt_len;
1022 struct ether_addr *ea;
1027 stats->size_bins[1]++;
1028 } else if (s > 64 && s < 1024) {
1031 /* count zeros, and offset into correct bin */
1032 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
1033 stats->size_bins[bin]++;
1036 stats->size_bins[0]++;
1038 stats->size_bins[6]++;
1040 stats->size_bins[7]++;
1043 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
1044 if (is_multicast_ether_addr(ea)) {
1045 if (is_broadcast_ether_addr(ea))
1053 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
1055 VIRTIO_DUMP_PACKET(m, m->data_len);
1057 virtio_update_packet_stats(&rxvq->stats, m);
1060 /* Optionally fill offload information in structure */
1062 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1064 struct rte_net_hdr_lens hdr_lens;
1065 uint32_t hdrlen, ptype;
1066 int l4_supported = 0;
1069 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1072 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
1074 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1075 m->packet_type = ptype;
1076 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1077 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1078 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1081 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1082 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1083 if (hdr->csum_start <= hdrlen && l4_supported) {
1084 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
1086 /* Unknown proto or tunnel, do sw cksum. We can assume
1087 * the cksum field is in the first segment since the
1088 * buffers we provided to the host are large enough.
1089 * In case of SCTP, this will be wrong since it's a CRC
1090 * but there's nothing we can do.
1092 uint16_t csum = 0, off;
1094 rte_raw_cksum_mbuf(m, hdr->csum_start,
1095 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1097 if (likely(csum != 0xffff))
1099 off = hdr->csum_offset + hdr->csum_start;
1100 if (rte_pktmbuf_data_len(m) >= off + 1)
1101 *rte_pktmbuf_mtod_offset(m, uint16_t *,
1104 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
1105 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1108 /* GSO request, save required information in mbuf */
1109 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1110 /* Check unsupported modes */
1111 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
1112 (hdr->gso_size == 0)) {
1116 /* Update mss lengthes in mbuf */
1117 m->tso_segsz = hdr->gso_size;
1118 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1119 case VIRTIO_NET_HDR_GSO_TCPV4:
1120 case VIRTIO_NET_HDR_GSO_TCPV6:
1121 m->ol_flags |= PKT_RX_LRO | \
1122 PKT_RX_L4_CKSUM_NONE;
1132 #define VIRTIO_MBUF_BURST_SZ 64
1133 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
1135 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1137 struct virtnet_rx *rxvq = rx_queue;
1138 struct virtqueue *vq = rxvq->vq;
1139 struct virtio_hw *hw = vq->hw;
1140 struct rte_mbuf *rxm, *new_mbuf;
1141 uint16_t nb_used, num, nb_rx;
1142 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1143 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1145 uint32_t i, nb_enqueued;
1147 struct virtio_net_hdr *hdr;
1150 if (unlikely(hw->started == 0))
1153 nb_used = VIRTQUEUE_NUSED(vq);
1155 virtio_rmb(hw->weak_barriers);
1157 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1158 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1159 num = VIRTIO_MBUF_BURST_SZ;
1160 if (likely(num > DESC_PER_CACHELINE))
1161 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1163 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1164 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1167 hdr_size = hw->vtnet_hdr_size;
1169 for (i = 0; i < num ; i++) {
1172 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1174 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1175 PMD_RX_LOG(ERR, "Packet drop");
1177 virtio_discard_rxbuf(vq, rxm);
1178 rxvq->stats.errors++;
1182 rxm->port = rxvq->port_id;
1183 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1187 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1188 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1190 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1191 RTE_PKTMBUF_HEADROOM - hdr_size);
1194 rte_vlan_strip(rxm);
1196 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1197 virtio_discard_rxbuf(vq, rxm);
1198 rxvq->stats.errors++;
1202 virtio_rx_stats_updated(rxvq, rxm);
1204 rx_pkts[nb_rx++] = rxm;
1207 rxvq->stats.packets += nb_rx;
1209 /* Allocate new mbuf for the used descriptor */
1210 while (likely(!virtqueue_full(vq))) {
1211 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1212 if (unlikely(new_mbuf == NULL)) {
1213 struct rte_eth_dev *dev
1214 = &rte_eth_devices[rxvq->port_id];
1215 dev->data->rx_mbuf_alloc_failed++;
1218 error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1);
1219 if (unlikely(error)) {
1220 rte_pktmbuf_free(new_mbuf);
1226 if (likely(nb_enqueued)) {
1227 vq_update_avail_idx(vq);
1229 if (unlikely(virtqueue_kick_prepare(vq))) {
1230 virtqueue_notify(vq);
1231 PMD_RX_LOG(DEBUG, "Notified");
1239 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1242 struct virtnet_rx *rxvq = rx_queue;
1243 struct virtqueue *vq = rxvq->vq;
1244 struct virtio_hw *hw = vq->hw;
1245 struct rte_mbuf *rxm, *new_mbuf;
1246 uint16_t num, nb_rx;
1247 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1248 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1250 uint32_t i, nb_enqueued;
1252 struct virtio_net_hdr *hdr;
1255 if (unlikely(hw->started == 0))
1258 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1259 if (likely(num > DESC_PER_CACHELINE))
1260 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1262 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1263 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1266 hdr_size = hw->vtnet_hdr_size;
1268 for (i = 0; i < num; i++) {
1271 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1273 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1274 PMD_RX_LOG(ERR, "Packet drop");
1276 virtio_discard_rxbuf(vq, rxm);
1277 rxvq->stats.errors++;
1281 rxm->port = rxvq->port_id;
1282 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1286 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1287 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1289 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1290 RTE_PKTMBUF_HEADROOM - hdr_size);
1293 rte_vlan_strip(rxm);
1295 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1296 virtio_discard_rxbuf(vq, rxm);
1297 rxvq->stats.errors++;
1301 virtio_rx_stats_updated(rxvq, rxm);
1303 rx_pkts[nb_rx++] = rxm;
1306 rxvq->stats.packets += nb_rx;
1308 /* Allocate new mbuf for the used descriptor */
1309 while (likely(!virtqueue_full(vq))) {
1310 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1311 if (unlikely(new_mbuf == NULL)) {
1312 struct rte_eth_dev *dev =
1313 &rte_eth_devices[rxvq->port_id];
1314 dev->data->rx_mbuf_alloc_failed++;
1317 error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
1318 if (unlikely(error)) {
1319 rte_pktmbuf_free(new_mbuf);
1325 if (likely(nb_enqueued)) {
1326 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1327 virtqueue_notify(vq);
1328 PMD_RX_LOG(DEBUG, "Notified");
1337 virtio_recv_pkts_inorder(void *rx_queue,
1338 struct rte_mbuf **rx_pkts,
1341 struct virtnet_rx *rxvq = rx_queue;
1342 struct virtqueue *vq = rxvq->vq;
1343 struct virtio_hw *hw = vq->hw;
1344 struct rte_mbuf *rxm;
1345 struct rte_mbuf *prev;
1346 uint16_t nb_used, num, nb_rx;
1347 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1348 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1350 uint32_t nb_enqueued;
1357 if (unlikely(hw->started == 0))
1360 nb_used = VIRTQUEUE_NUSED(vq);
1361 nb_used = RTE_MIN(nb_used, nb_pkts);
1362 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1364 virtio_rmb(hw->weak_barriers);
1366 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1371 hdr_size = hw->vtnet_hdr_size;
1373 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1375 for (i = 0; i < num; i++) {
1376 struct virtio_net_hdr_mrg_rxbuf *header;
1378 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1379 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1383 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1384 PMD_RX_LOG(ERR, "Packet drop");
1386 virtio_discard_rxbuf_inorder(vq, rxm);
1387 rxvq->stats.errors++;
1391 header = (struct virtio_net_hdr_mrg_rxbuf *)
1392 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1395 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1396 seg_num = header->num_buffers;
1403 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1404 rxm->nb_segs = seg_num;
1407 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1408 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1410 rxm->port = rxvq->port_id;
1412 rx_pkts[nb_rx] = rxm;
1415 if (vq->hw->has_rx_offload &&
1416 virtio_rx_offload(rxm, &header->hdr) < 0) {
1417 virtio_discard_rxbuf_inorder(vq, rxm);
1418 rxvq->stats.errors++;
1423 rte_vlan_strip(rx_pkts[nb_rx]);
1425 seg_res = seg_num - 1;
1427 /* Merge remaining segments */
1428 while (seg_res != 0 && i < (num - 1)) {
1432 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1433 rxm->pkt_len = (uint32_t)(len[i]);
1434 rxm->data_len = (uint16_t)(len[i]);
1436 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1437 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1447 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1452 /* Last packet still need merge segments */
1453 while (seg_res != 0) {
1454 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1455 VIRTIO_MBUF_BURST_SZ);
1457 prev = rcv_pkts[nb_rx];
1458 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1459 virtio_rmb(hw->weak_barriers);
1460 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1462 uint16_t extra_idx = 0;
1465 while (extra_idx < rcv_cnt) {
1466 rxm = rcv_pkts[extra_idx];
1468 RTE_PKTMBUF_HEADROOM - hdr_size;
1469 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1470 rxm->data_len = (uint16_t)(len[extra_idx]);
1473 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1474 rx_pkts[nb_rx]->data_len += len[extra_idx];
1480 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1485 "No enough segments for packet.");
1486 virtio_discard_rxbuf_inorder(vq, prev);
1487 rxvq->stats.errors++;
1492 rxvq->stats.packets += nb_rx;
1494 /* Allocate new mbuf for the used descriptor */
1496 if (likely(!virtqueue_full(vq))) {
1497 /* free_cnt may include mrg descs */
1498 uint16_t free_cnt = vq->vq_free_cnt;
1499 struct rte_mbuf *new_pkts[free_cnt];
1501 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1502 error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1504 if (unlikely(error)) {
1505 for (i = 0; i < free_cnt; i++)
1506 rte_pktmbuf_free(new_pkts[i]);
1508 nb_enqueued += free_cnt;
1510 struct rte_eth_dev *dev =
1511 &rte_eth_devices[rxvq->port_id];
1512 dev->data->rx_mbuf_alloc_failed += free_cnt;
1516 if (likely(nb_enqueued)) {
1517 vq_update_avail_idx(vq);
1519 if (unlikely(virtqueue_kick_prepare(vq))) {
1520 virtqueue_notify(vq);
1521 PMD_RX_LOG(DEBUG, "Notified");
1529 virtio_recv_mergeable_pkts(void *rx_queue,
1530 struct rte_mbuf **rx_pkts,
1533 struct virtnet_rx *rxvq = rx_queue;
1534 struct virtqueue *vq = rxvq->vq;
1535 struct virtio_hw *hw = vq->hw;
1536 struct rte_mbuf *rxm;
1537 struct rte_mbuf *prev;
1538 uint16_t nb_used, num, nb_rx = 0;
1539 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1540 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1542 uint32_t nb_enqueued = 0;
1543 uint32_t seg_num = 0;
1544 uint32_t seg_res = 0;
1545 uint32_t hdr_size = hw->vtnet_hdr_size;
1548 if (unlikely(hw->started == 0))
1551 nb_used = VIRTQUEUE_NUSED(vq);
1553 virtio_rmb(hw->weak_barriers);
1555 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1557 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1558 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1559 num = VIRTIO_MBUF_BURST_SZ;
1560 if (likely(num > DESC_PER_CACHELINE))
1561 num = num - ((vq->vq_used_cons_idx + num) %
1562 DESC_PER_CACHELINE);
1565 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1567 for (i = 0; i < num; i++) {
1568 struct virtio_net_hdr_mrg_rxbuf *header;
1570 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1571 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1575 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1576 PMD_RX_LOG(ERR, "Packet drop");
1578 virtio_discard_rxbuf(vq, rxm);
1579 rxvq->stats.errors++;
1583 header = (struct virtio_net_hdr_mrg_rxbuf *)
1584 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1586 seg_num = header->num_buffers;
1590 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1591 rxm->nb_segs = seg_num;
1594 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1595 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1597 rxm->port = rxvq->port_id;
1599 rx_pkts[nb_rx] = rxm;
1602 if (hw->has_rx_offload &&
1603 virtio_rx_offload(rxm, &header->hdr) < 0) {
1604 virtio_discard_rxbuf(vq, rxm);
1605 rxvq->stats.errors++;
1610 rte_vlan_strip(rx_pkts[nb_rx]);
1612 seg_res = seg_num - 1;
1614 /* Merge remaining segments */
1615 while (seg_res != 0 && i < (num - 1)) {
1619 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1620 rxm->pkt_len = (uint32_t)(len[i]);
1621 rxm->data_len = (uint16_t)(len[i]);
1623 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1624 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1634 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1639 /* Last packet still need merge segments */
1640 while (seg_res != 0) {
1641 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1642 VIRTIO_MBUF_BURST_SZ);
1644 prev = rcv_pkts[nb_rx];
1645 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1646 virtio_rmb(hw->weak_barriers);
1647 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1649 uint16_t extra_idx = 0;
1652 while (extra_idx < rcv_cnt) {
1653 rxm = rcv_pkts[extra_idx];
1655 RTE_PKTMBUF_HEADROOM - hdr_size;
1656 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1657 rxm->data_len = (uint16_t)(len[extra_idx]);
1660 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1661 rx_pkts[nb_rx]->data_len += len[extra_idx];
1667 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1672 "No enough segments for packet.");
1673 virtio_discard_rxbuf(vq, prev);
1674 rxvq->stats.errors++;
1679 rxvq->stats.packets += nb_rx;
1681 /* Allocate new mbuf for the used descriptor */
1682 if (likely(!virtqueue_full(vq))) {
1683 /* free_cnt may include mrg descs */
1684 uint16_t free_cnt = vq->vq_free_cnt;
1685 struct rte_mbuf *new_pkts[free_cnt];
1687 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1688 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1690 if (unlikely(error)) {
1691 for (i = 0; i < free_cnt; i++)
1692 rte_pktmbuf_free(new_pkts[i]);
1694 nb_enqueued += free_cnt;
1696 struct rte_eth_dev *dev =
1697 &rte_eth_devices[rxvq->port_id];
1698 dev->data->rx_mbuf_alloc_failed += free_cnt;
1702 if (likely(nb_enqueued)) {
1703 vq_update_avail_idx(vq);
1705 if (unlikely(virtqueue_kick_prepare(vq))) {
1706 virtqueue_notify(vq);
1707 PMD_RX_LOG(DEBUG, "Notified");
1715 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1716 struct rte_mbuf **rx_pkts,
1719 struct virtnet_rx *rxvq = rx_queue;
1720 struct virtqueue *vq = rxvq->vq;
1721 struct virtio_hw *hw = vq->hw;
1722 struct rte_mbuf *rxm;
1723 struct rte_mbuf *prev = NULL;
1724 uint16_t num, nb_rx = 0;
1725 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1726 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1727 uint32_t nb_enqueued = 0;
1728 uint32_t seg_num = 0;
1729 uint32_t seg_res = 0;
1730 uint32_t hdr_size = hw->vtnet_hdr_size;
1734 if (unlikely(hw->started == 0))
1739 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1740 num = VIRTIO_MBUF_BURST_SZ;
1741 if (likely(num > DESC_PER_CACHELINE))
1742 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1744 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1746 for (i = 0; i < num; i++) {
1747 struct virtio_net_hdr_mrg_rxbuf *header;
1749 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1750 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1754 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1755 PMD_RX_LOG(ERR, "Packet drop");
1757 virtio_discard_rxbuf(vq, rxm);
1758 rxvq->stats.errors++;
1762 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1763 rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1764 seg_num = header->num_buffers;
1769 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1770 rxm->nb_segs = seg_num;
1773 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1774 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1776 rxm->port = rxvq->port_id;
1777 rx_pkts[nb_rx] = rxm;
1780 if (hw->has_rx_offload &&
1781 virtio_rx_offload(rxm, &header->hdr) < 0) {
1782 virtio_discard_rxbuf(vq, rxm);
1783 rxvq->stats.errors++;
1788 rte_vlan_strip(rx_pkts[nb_rx]);
1790 seg_res = seg_num - 1;
1792 /* Merge remaining segments */
1793 while (seg_res != 0 && i < (num - 1)) {
1797 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1798 rxm->pkt_len = (uint32_t)(len[i]);
1799 rxm->data_len = (uint16_t)(len[i]);
1801 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1802 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1812 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1817 /* Last packet still need merge segments */
1818 while (seg_res != 0) {
1819 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1820 VIRTIO_MBUF_BURST_SZ);
1821 if (likely(vq->vq_free_cnt >= rcv_cnt)) {
1822 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1824 uint16_t extra_idx = 0;
1828 while (extra_idx < rcv_cnt) {
1829 rxm = rcv_pkts[extra_idx];
1832 RTE_PKTMBUF_HEADROOM - hdr_size;
1833 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1834 rxm->data_len = (uint16_t)(len[extra_idx]);
1838 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1839 rx_pkts[nb_rx]->data_len += len[extra_idx];
1844 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1849 "No enough segments for packet.");
1851 virtio_discard_rxbuf(vq, prev);
1852 rxvq->stats.errors++;
1857 rxvq->stats.packets += nb_rx;
1859 /* Allocate new mbuf for the used descriptor */
1860 if (likely(!virtqueue_full(vq))) {
1861 /* free_cnt may include mrg descs */
1862 uint16_t free_cnt = vq->vq_free_cnt;
1863 struct rte_mbuf *new_pkts[free_cnt];
1865 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1866 error = virtqueue_enqueue_recv_refill_packed(vq,
1867 new_pkts, free_cnt);
1868 if (unlikely(error)) {
1869 for (i = 0; i < free_cnt; i++)
1870 rte_pktmbuf_free(new_pkts[i]);
1872 nb_enqueued += free_cnt;
1874 struct rte_eth_dev *dev =
1875 &rte_eth_devices[rxvq->port_id];
1876 dev->data->rx_mbuf_alloc_failed += free_cnt;
1880 if (likely(nb_enqueued)) {
1881 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1882 virtqueue_notify(vq);
1883 PMD_RX_LOG(DEBUG, "Notified");
1891 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1894 struct virtnet_tx *txvq = tx_queue;
1895 struct virtqueue *vq = txvq->vq;
1896 struct virtio_hw *hw = vq->hw;
1897 uint16_t hdr_size = hw->vtnet_hdr_size;
1901 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1904 if (unlikely(nb_pkts < 1))
1907 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1909 if (nb_pkts > vq->vq_free_cnt)
1910 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
1912 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1913 struct rte_mbuf *txm = tx_pkts[nb_tx];
1914 int can_push = 0, slots, need;
1916 /* Do VLAN tag insertion */
1917 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1918 error = rte_vlan_insert(&txm);
1919 if (unlikely(error)) {
1920 rte_pktmbuf_free(txm);
1925 /* optimize ring usage */
1926 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1927 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1928 rte_mbuf_refcnt_read(txm) == 1 &&
1929 RTE_MBUF_DIRECT(txm) &&
1930 txm->nb_segs == 1 &&
1931 rte_pktmbuf_headroom(txm) >= hdr_size &&
1932 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1933 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1936 /* How many main ring entries are needed to this Tx?
1937 * any_layout => number of segments
1938 * default => number of segments + 1
1940 slots = txm->nb_segs + !can_push;
1941 need = slots - vq->vq_free_cnt;
1943 /* Positive value indicates it need free vring descriptors */
1944 if (unlikely(need > 0)) {
1945 need = RTE_MIN(need, (int)nb_pkts);
1946 virtio_xmit_cleanup_packed(vq, need);
1947 need = slots - vq->vq_free_cnt;
1948 if (unlikely(need > 0)) {
1950 "No free tx descriptors to transmit");
1955 /* Enqueue Packet buffers */
1956 virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
1958 virtio_update_packet_stats(&txvq->stats, txm);
1961 txvq->stats.packets += nb_tx;
1963 if (likely(nb_tx)) {
1964 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1965 virtqueue_notify(vq);
1966 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1974 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1976 struct virtnet_tx *txvq = tx_queue;
1977 struct virtqueue *vq = txvq->vq;
1978 struct virtio_hw *hw = vq->hw;
1979 uint16_t hdr_size = hw->vtnet_hdr_size;
1980 uint16_t nb_used, nb_tx = 0;
1983 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1986 if (unlikely(nb_pkts < 1))
1989 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1990 nb_used = VIRTQUEUE_NUSED(vq);
1992 virtio_rmb(hw->weak_barriers);
1993 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1994 virtio_xmit_cleanup(vq, nb_used);
1996 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1997 struct rte_mbuf *txm = tx_pkts[nb_tx];
1998 int can_push = 0, use_indirect = 0, slots, need;
2000 /* Do VLAN tag insertion */
2001 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
2002 error = rte_vlan_insert(&txm);
2003 if (unlikely(error)) {
2004 rte_pktmbuf_free(txm);
2009 /* optimize ring usage */
2010 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2011 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2012 rte_mbuf_refcnt_read(txm) == 1 &&
2013 RTE_MBUF_DIRECT(txm) &&
2014 txm->nb_segs == 1 &&
2015 rte_pktmbuf_headroom(txm) >= hdr_size &&
2016 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2017 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2019 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
2020 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
2023 /* How many main ring entries are needed to this Tx?
2024 * any_layout => number of segments
2026 * default => number of segments + 1
2028 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
2029 need = slots - vq->vq_free_cnt;
2031 /* Positive value indicates it need free vring descriptors */
2032 if (unlikely(need > 0)) {
2033 nb_used = VIRTQUEUE_NUSED(vq);
2034 virtio_rmb(hw->weak_barriers);
2035 need = RTE_MIN(need, (int)nb_used);
2037 virtio_xmit_cleanup(vq, need);
2038 need = slots - vq->vq_free_cnt;
2039 if (unlikely(need > 0)) {
2041 "No free tx descriptors to transmit");
2046 /* Enqueue Packet buffers */
2047 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
2050 virtio_update_packet_stats(&txvq->stats, txm);
2053 txvq->stats.packets += nb_tx;
2055 if (likely(nb_tx)) {
2056 vq_update_avail_idx(vq);
2058 if (unlikely(virtqueue_kick_prepare(vq))) {
2059 virtqueue_notify(vq);
2060 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2068 virtio_xmit_pkts_inorder(void *tx_queue,
2069 struct rte_mbuf **tx_pkts,
2072 struct virtnet_tx *txvq = tx_queue;
2073 struct virtqueue *vq = txvq->vq;
2074 struct virtio_hw *hw = vq->hw;
2075 uint16_t hdr_size = hw->vtnet_hdr_size;
2076 uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
2077 struct rte_mbuf *inorder_pkts[nb_pkts];
2080 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2083 if (unlikely(nb_pkts < 1))
2087 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2088 nb_used = VIRTQUEUE_NUSED(vq);
2090 virtio_rmb(hw->weak_barriers);
2091 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2092 virtio_xmit_cleanup_inorder(vq, nb_used);
2094 if (unlikely(!vq->vq_free_cnt))
2095 virtio_xmit_cleanup_inorder(vq, nb_used);
2097 nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
2099 for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
2100 struct rte_mbuf *txm = tx_pkts[nb_tx];
2103 /* Do VLAN tag insertion */
2104 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
2105 error = rte_vlan_insert(&txm);
2106 if (unlikely(error)) {
2107 rte_pktmbuf_free(txm);
2112 /* optimize ring usage */
2113 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2114 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2115 rte_mbuf_refcnt_read(txm) == 1 &&
2116 RTE_MBUF_DIRECT(txm) &&
2117 txm->nb_segs == 1 &&
2118 rte_pktmbuf_headroom(txm) >= hdr_size &&
2119 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2120 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
2121 inorder_pkts[nb_inorder_pkts] = txm;
2124 virtio_update_packet_stats(&txvq->stats, txm);
2128 if (nb_inorder_pkts) {
2129 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2131 nb_inorder_pkts = 0;
2134 slots = txm->nb_segs + 1;
2135 need = slots - vq->vq_free_cnt;
2136 if (unlikely(need > 0)) {
2137 nb_used = VIRTQUEUE_NUSED(vq);
2138 virtio_rmb(hw->weak_barriers);
2139 need = RTE_MIN(need, (int)nb_used);
2141 virtio_xmit_cleanup_inorder(vq, need);
2143 need = slots - vq->vq_free_cnt;
2145 if (unlikely(need > 0)) {
2147 "No free tx descriptors to transmit");
2151 /* Enqueue Packet buffers */
2152 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2154 virtio_update_packet_stats(&txvq->stats, txm);
2157 /* Transmit all inorder packets */
2158 if (nb_inorder_pkts)
2159 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2162 txvq->stats.packets += nb_tx;
2164 if (likely(nb_tx)) {
2165 vq_update_avail_idx(vq);
2167 if (unlikely(virtqueue_kick_prepare(vq))) {
2168 virtqueue_notify(vq);
2169 PMD_TX_LOG(DEBUG, "Notified backend after xmit");