1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
39 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
43 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
45 struct virtnet_rx *rxvq = rxq;
46 struct virtqueue *vq = rxvq->vq;
48 return VIRTQUEUE_NUSED(vq) >= offset;
52 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
54 vq->vq_free_cnt += num;
55 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
59 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
61 struct vring_desc *dp, *dp_tail;
62 struct vq_desc_extra *dxp;
63 uint16_t desc_idx_last = desc_idx;
65 dp = &vq->vq_ring.desc[desc_idx];
66 dxp = &vq->vq_descx[desc_idx];
67 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
68 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
69 while (dp->flags & VRING_DESC_F_NEXT) {
70 desc_idx_last = dp->next;
71 dp = &vq->vq_ring.desc[dp->next];
77 * We must append the existing free chain, if any, to the end of
78 * newly freed chain. If the virtqueue was completely used, then
79 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
81 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
82 vq->vq_desc_head_idx = desc_idx;
84 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
85 dp_tail->next = desc_idx;
88 vq->vq_desc_tail_idx = desc_idx_last;
89 dp->next = VQ_RING_DESC_CHAIN_END;
93 vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
95 struct vq_desc_extra *dxp;
97 dxp = &vq->vq_descx[id];
98 vq->vq_free_cnt += dxp->ndescs;
100 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
101 vq->vq_desc_head_idx = id;
103 vq->vq_descx[vq->vq_desc_tail_idx].next = id;
105 vq->vq_desc_tail_idx = id;
106 dxp->next = VQ_RING_DESC_CHAIN_END;
110 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
111 struct rte_mbuf **rx_pkts,
115 struct rte_mbuf *cookie;
118 struct vring_packed_desc *desc;
121 desc = vq->ring_packed.desc_packed;
123 for (i = 0; i < num; i++) {
124 used_idx = vq->vq_used_cons_idx;
125 if (!desc_is_used(&desc[used_idx], vq))
127 virtio_rmb(vq->hw->weak_barriers);
128 len[i] = desc[used_idx].len;
129 id = desc[used_idx].id;
130 cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
131 if (unlikely(cookie == NULL)) {
132 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
133 vq->vq_used_cons_idx);
136 rte_prefetch0(cookie);
137 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
141 vq->vq_used_cons_idx++;
142 if (vq->vq_used_cons_idx >= vq->vq_nentries) {
143 vq->vq_used_cons_idx -= vq->vq_nentries;
144 vq->used_wrap_counter ^= 1;
152 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
153 uint32_t *len, uint16_t num)
155 struct vring_used_elem *uep;
156 struct rte_mbuf *cookie;
157 uint16_t used_idx, desc_idx;
160 /* Caller does the check */
161 for (i = 0; i < num ; i++) {
162 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
163 uep = &vq->vq_ring.used->ring[used_idx];
164 desc_idx = (uint16_t) uep->id;
166 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
168 if (unlikely(cookie == NULL)) {
169 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
170 vq->vq_used_cons_idx);
174 rte_prefetch0(cookie);
175 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
177 vq->vq_used_cons_idx++;
178 vq_ring_free_chain(vq, desc_idx);
179 vq->vq_descx[desc_idx].cookie = NULL;
186 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
187 struct rte_mbuf **rx_pkts,
191 struct vring_used_elem *uep;
192 struct rte_mbuf *cookie;
193 uint16_t used_idx = 0;
196 if (unlikely(num == 0))
199 for (i = 0; i < num; i++) {
200 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
201 /* Desc idx same as used idx */
202 uep = &vq->vq_ring.used->ring[used_idx];
204 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
206 if (unlikely(cookie == NULL)) {
207 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
208 vq->vq_used_cons_idx);
212 rte_prefetch0(cookie);
213 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
215 vq->vq_used_cons_idx++;
216 vq->vq_descx[used_idx].cookie = NULL;
219 vq_ring_free_inorder(vq, used_idx, i);
223 #ifndef DEFAULT_TX_FREE_THRESH
224 #define DEFAULT_TX_FREE_THRESH 32
227 /* Cleanup from completed transmits. */
229 virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
231 uint16_t used_idx, id;
232 uint16_t size = vq->vq_nentries;
233 struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
234 struct vq_desc_extra *dxp;
236 used_idx = vq->vq_used_cons_idx;
237 while (num-- && desc_is_used(&desc[used_idx], vq)) {
238 virtio_rmb(vq->hw->weak_barriers);
239 id = desc[used_idx].id;
240 dxp = &vq->vq_descx[id];
241 vq->vq_used_cons_idx += dxp->ndescs;
242 if (vq->vq_used_cons_idx >= size) {
243 vq->vq_used_cons_idx -= size;
244 vq->used_wrap_counter ^= 1;
246 vq_ring_free_id_packed(vq, id);
247 if (dxp->cookie != NULL) {
248 rte_pktmbuf_free(dxp->cookie);
251 used_idx = vq->vq_used_cons_idx;
256 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
258 uint16_t i, used_idx, desc_idx;
259 for (i = 0; i < num; i++) {
260 struct vring_used_elem *uep;
261 struct vq_desc_extra *dxp;
263 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
264 uep = &vq->vq_ring.used->ring[used_idx];
266 desc_idx = (uint16_t) uep->id;
267 dxp = &vq->vq_descx[desc_idx];
268 vq->vq_used_cons_idx++;
269 vq_ring_free_chain(vq, desc_idx);
271 if (dxp->cookie != NULL) {
272 rte_pktmbuf_free(dxp->cookie);
278 /* Cleanup from completed inorder transmits. */
280 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
282 uint16_t i, idx = vq->vq_used_cons_idx;
283 int16_t free_cnt = 0;
284 struct vq_desc_extra *dxp = NULL;
286 if (unlikely(num == 0))
289 for (i = 0; i < num; i++) {
290 dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
291 free_cnt += dxp->ndescs;
292 if (dxp->cookie != NULL) {
293 rte_pktmbuf_free(dxp->cookie);
298 vq->vq_free_cnt += free_cnt;
299 vq->vq_used_cons_idx = idx;
303 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
304 struct rte_mbuf **cookies,
307 struct vq_desc_extra *dxp;
308 struct virtio_hw *hw = vq->hw;
309 struct vring_desc *start_dp;
310 uint16_t head_idx, idx, i = 0;
312 if (unlikely(vq->vq_free_cnt == 0))
314 if (unlikely(vq->vq_free_cnt < num))
317 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
318 start_dp = vq->vq_ring.desc;
321 idx = head_idx & (vq->vq_nentries - 1);
322 dxp = &vq->vq_descx[idx];
323 dxp->cookie = (void *)cookies[i];
327 VIRTIO_MBUF_ADDR(cookies[i], vq) +
328 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
330 cookies[i]->buf_len -
331 RTE_PKTMBUF_HEADROOM +
333 start_dp[idx].flags = VRING_DESC_F_WRITE;
335 vq_update_avail_ring(vq, idx);
340 vq->vq_desc_head_idx += num;
341 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
346 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
349 struct vq_desc_extra *dxp;
350 struct virtio_hw *hw = vq->hw;
351 struct vring_desc *start_dp = vq->vq_ring.desc;
354 if (unlikely(vq->vq_free_cnt == 0))
356 if (unlikely(vq->vq_free_cnt < num))
359 if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
362 for (i = 0; i < num; i++) {
363 idx = vq->vq_desc_head_idx;
364 dxp = &vq->vq_descx[idx];
365 dxp->cookie = (void *)cookie[i];
369 VIRTIO_MBUF_ADDR(cookie[i], vq) +
370 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
372 cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
374 start_dp[idx].flags = VRING_DESC_F_WRITE;
375 vq->vq_desc_head_idx = start_dp[idx].next;
376 vq_update_avail_ring(vq, idx);
377 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
378 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
383 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
389 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
390 struct rte_mbuf **cookie, uint16_t num)
392 struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
393 uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
394 struct virtio_hw *hw = vq->hw;
395 struct vq_desc_extra *dxp;
399 if (unlikely(vq->vq_free_cnt == 0))
401 if (unlikely(vq->vq_free_cnt < num))
404 for (i = 0; i < num; i++) {
405 idx = vq->vq_avail_idx;
406 dxp = &vq->vq_descx[idx];
407 dxp->cookie = (void *)cookie[i];
410 start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
411 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
412 start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
413 + hw->vtnet_hdr_size;
415 vq->vq_desc_head_idx = dxp->next;
416 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
417 vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
418 virtio_wmb(hw->weak_barriers);
419 start_dp[idx].flags = flags;
420 if (++vq->vq_avail_idx >= vq->vq_nentries) {
421 vq->vq_avail_idx -= vq->vq_nentries;
422 vq->avail_wrap_counter ^= 1;
423 vq->avail_used_flags =
424 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
425 VRING_DESC_F_USED(!vq->avail_wrap_counter);
426 flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
429 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
433 /* When doing TSO, the IP length is not included in the pseudo header
434 * checksum of the packet given to the PMD, but for virtio it is
438 virtio_tso_fix_cksum(struct rte_mbuf *m)
440 /* common case: header is not fragmented */
441 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
443 struct ipv4_hdr *iph;
444 struct ipv6_hdr *ip6h;
446 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
449 iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
450 th = RTE_PTR_ADD(iph, m->l3_len);
451 if ((iph->version_ihl >> 4) == 4) {
452 iph->hdr_checksum = 0;
453 iph->hdr_checksum = rte_ipv4_cksum(iph);
454 ip_len = iph->total_length;
455 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
458 ip6h = (struct ipv6_hdr *)iph;
459 ip_paylen = ip6h->payload_len;
462 /* calculate the new phdr checksum not including ip_paylen */
463 prev_cksum = th->cksum;
466 tmp = (tmp & 0xffff) + (tmp >> 16);
469 /* replace it in the packet */
470 th->cksum = new_cksum;
475 /* avoid write operation when necessary, to lessen cache issues */
476 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
477 if ((var) != (val)) \
482 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
483 struct rte_mbuf *cookie,
487 if (cookie->ol_flags & PKT_TX_TCP_SEG)
488 cookie->ol_flags |= PKT_TX_TCP_CKSUM;
490 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
491 case PKT_TX_UDP_CKSUM:
492 hdr->csum_start = cookie->l2_len + cookie->l3_len;
493 hdr->csum_offset = offsetof(struct udp_hdr,
495 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
498 case PKT_TX_TCP_CKSUM:
499 hdr->csum_start = cookie->l2_len + cookie->l3_len;
500 hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
501 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
505 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
506 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
507 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
511 /* TCP Segmentation Offload */
512 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
513 virtio_tso_fix_cksum(cookie);
514 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
515 VIRTIO_NET_HDR_GSO_TCPV6 :
516 VIRTIO_NET_HDR_GSO_TCPV4;
517 hdr->gso_size = cookie->tso_segsz;
523 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
524 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
525 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
531 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
532 struct rte_mbuf **cookies,
535 struct vq_desc_extra *dxp;
536 struct virtqueue *vq = txvq->vq;
537 struct vring_desc *start_dp;
538 struct virtio_net_hdr *hdr;
540 uint16_t head_size = vq->hw->vtnet_hdr_size;
543 idx = vq->vq_desc_head_idx;
544 start_dp = vq->vq_ring.desc;
547 idx = idx & (vq->vq_nentries - 1);
548 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
549 dxp->cookie = (void *)cookies[i];
552 hdr = (struct virtio_net_hdr *)
553 rte_pktmbuf_prepend(cookies[i], head_size);
554 cookies[i]->pkt_len -= head_size;
556 /* if offload disabled, it is not zeroed below, do it now */
557 if (!vq->hw->has_tx_offload) {
558 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
559 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
560 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
561 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
562 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
563 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
566 virtqueue_xmit_offload(hdr, cookies[i],
567 vq->hw->has_tx_offload);
569 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
570 start_dp[idx].len = cookies[i]->data_len;
571 start_dp[idx].flags = 0;
573 vq_update_avail_ring(vq, idx);
579 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
580 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
584 virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
585 uint16_t needed, int can_push)
587 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
588 struct vq_desc_extra *dxp;
589 struct virtqueue *vq = txvq->vq;
590 struct vring_packed_desc *start_dp, *head_dp;
591 uint16_t idx, id, head_idx, head_flags;
592 uint16_t head_size = vq->hw->vtnet_hdr_size;
593 struct virtio_net_hdr *hdr;
596 id = vq->vq_desc_head_idx;
598 dxp = &vq->vq_descx[id];
599 dxp->ndescs = needed;
600 dxp->cookie = cookie;
602 head_idx = vq->vq_avail_idx;
605 start_dp = vq->ring_packed.desc_packed;
607 head_dp = &vq->ring_packed.desc_packed[idx];
608 head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
609 head_flags |= vq->avail_used_flags;
612 /* prepend cannot fail, checked by caller */
613 hdr = (struct virtio_net_hdr *)
614 rte_pktmbuf_prepend(cookie, head_size);
615 /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
616 * which is wrong. Below subtract restores correct pkt size.
618 cookie->pkt_len -= head_size;
620 /* if offload disabled, it is not zeroed below, do it now */
621 if (!vq->hw->has_tx_offload) {
622 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
623 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
624 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
625 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
626 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
627 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
630 /* setup first tx ring slot to point to header
631 * stored in reserved region.
633 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
634 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
635 start_dp[idx].len = vq->hw->vtnet_hdr_size;
636 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
638 if (idx >= vq->vq_nentries) {
639 idx -= vq->vq_nentries;
640 vq->avail_wrap_counter ^= 1;
641 vq->avail_used_flags =
642 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
643 VRING_DESC_F_USED(!vq->avail_wrap_counter);
647 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
652 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
653 start_dp[idx].len = cookie->data_len;
654 if (likely(idx != head_idx)) {
655 flags = cookie->next ? VRING_DESC_F_NEXT : 0;
656 flags |= vq->avail_used_flags;
657 start_dp[idx].flags = flags;
661 if (idx >= vq->vq_nentries) {
662 idx -= vq->vq_nentries;
663 vq->avail_wrap_counter ^= 1;
664 vq->avail_used_flags =
665 VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
666 VRING_DESC_F_USED(!vq->avail_wrap_counter);
668 } while ((cookie = cookie->next) != NULL);
670 start_dp[prev].id = id;
672 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
674 vq->vq_desc_head_idx = dxp->next;
675 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
676 vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
678 vq->vq_avail_idx = idx;
680 virtio_wmb(vq->hw->weak_barriers);
681 head_dp->flags = head_flags;
685 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
686 uint16_t needed, int use_indirect, int can_push,
689 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
690 struct vq_desc_extra *dxp;
691 struct virtqueue *vq = txvq->vq;
692 struct vring_desc *start_dp;
693 uint16_t seg_num = cookie->nb_segs;
694 uint16_t head_idx, idx;
695 uint16_t head_size = vq->hw->vtnet_hdr_size;
696 struct virtio_net_hdr *hdr;
698 head_idx = vq->vq_desc_head_idx;
701 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
703 dxp = &vq->vq_descx[idx];
704 dxp->cookie = (void *)cookie;
705 dxp->ndescs = needed;
707 start_dp = vq->vq_ring.desc;
710 /* prepend cannot fail, checked by caller */
711 hdr = (struct virtio_net_hdr *)
712 rte_pktmbuf_prepend(cookie, head_size);
713 /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
714 * which is wrong. Below subtract restores correct pkt size.
716 cookie->pkt_len -= head_size;
718 /* if offload disabled, it is not zeroed below, do it now */
719 if (!vq->hw->has_tx_offload) {
720 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
721 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
722 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
723 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
724 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
725 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
727 } else if (use_indirect) {
728 /* setup tx ring slot to point to indirect
729 * descriptor list stored in reserved region.
731 * the first slot in indirect ring is already preset
732 * to point to the header in reserved region
734 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
735 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
736 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
737 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
738 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
740 /* loop below will fill in rest of the indirect elements */
741 start_dp = txr[idx].tx_indir;
744 /* setup first tx ring slot to point to header
745 * stored in reserved region.
747 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
748 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
749 start_dp[idx].len = vq->hw->vtnet_hdr_size;
750 start_dp[idx].flags = VRING_DESC_F_NEXT;
751 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
753 idx = start_dp[idx].next;
756 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
759 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
760 start_dp[idx].len = cookie->data_len;
761 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
762 idx = start_dp[idx].next;
763 } while ((cookie = cookie->next) != NULL);
766 idx = vq->vq_ring.desc[head_idx].next;
768 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
770 vq->vq_desc_head_idx = idx;
771 vq_update_avail_ring(vq, head_idx);
774 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
775 vq->vq_desc_tail_idx = idx;
780 virtio_dev_cq_start(struct rte_eth_dev *dev)
782 struct virtio_hw *hw = dev->data->dev_private;
784 if (hw->cvq && hw->cvq->vq) {
785 rte_spinlock_init(&hw->cvq->lock);
786 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
791 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
794 unsigned int socket_id __rte_unused,
795 const struct rte_eth_rxconf *rx_conf __rte_unused,
796 struct rte_mempool *mp)
798 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
799 struct virtio_hw *hw = dev->data->dev_private;
800 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
801 struct virtnet_rx *rxvq;
803 PMD_INIT_FUNC_TRACE();
805 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
806 nb_desc = vq->vq_nentries;
807 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
810 rxvq->queue_id = queue_idx;
812 if (rxvq->mpool == NULL) {
813 rte_exit(EXIT_FAILURE,
814 "Cannot allocate mbufs for rx virtqueue");
817 dev->data->rx_queues[queue_idx] = rxvq;
823 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
825 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
826 struct virtio_hw *hw = dev->data->dev_private;
827 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
828 struct virtnet_rx *rxvq = &vq->rxq;
833 PMD_INIT_FUNC_TRACE();
835 /* Allocate blank mbufs for the each rx descriptor */
838 if (hw->use_simple_rx) {
839 for (desc_idx = 0; desc_idx < vq->vq_nentries;
841 vq->vq_ring.avail->ring[desc_idx] = desc_idx;
842 vq->vq_ring.desc[desc_idx].flags =
846 virtio_rxq_vec_setup(rxvq);
849 memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
850 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
852 vq->sw_ring[vq->vq_nentries + desc_idx] =
856 if (hw->use_simple_rx) {
857 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
858 virtio_rxq_rearm_vec(rxvq);
859 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
861 } else if (hw->use_inorder_rx) {
862 if ((!virtqueue_full(vq))) {
863 uint16_t free_cnt = vq->vq_free_cnt;
864 struct rte_mbuf *pkts[free_cnt];
866 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
868 error = virtqueue_enqueue_refill_inorder(vq,
871 if (unlikely(error)) {
872 for (i = 0; i < free_cnt; i++)
873 rte_pktmbuf_free(pkts[i]);
878 vq_update_avail_idx(vq);
881 while (!virtqueue_full(vq)) {
882 m = rte_mbuf_raw_alloc(rxvq->mpool);
886 /* Enqueue allocated buffers */
887 if (vtpci_packed_queue(vq->hw))
888 error = virtqueue_enqueue_recv_refill_packed(vq,
891 error = virtqueue_enqueue_recv_refill(vq,
900 if (!vtpci_packed_queue(vq->hw))
901 vq_update_avail_idx(vq);
904 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
912 * struct rte_eth_dev *dev: Used to update dev
913 * uint16_t nb_desc: Defaults to values read from config space
914 * unsigned int socket_id: Used to allocate memzone
915 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
916 * uint16_t queue_idx: Just used as an index in dev txq list
919 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
922 unsigned int socket_id __rte_unused,
923 const struct rte_eth_txconf *tx_conf)
925 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
926 struct virtio_hw *hw = dev->data->dev_private;
927 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
928 struct virtnet_tx *txvq;
929 uint16_t tx_free_thresh;
931 PMD_INIT_FUNC_TRACE();
933 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
934 nb_desc = vq->vq_nentries;
935 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
938 txvq->queue_id = queue_idx;
940 tx_free_thresh = tx_conf->tx_free_thresh;
941 if (tx_free_thresh == 0)
943 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
945 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
946 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
947 "number of TX entries minus 3 (%u)."
948 " (tx_free_thresh=%u port=%u queue=%u)\n",
950 tx_free_thresh, dev->data->port_id, queue_idx);
954 vq->vq_free_thresh = tx_free_thresh;
956 dev->data->tx_queues[queue_idx] = txvq;
961 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
964 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
965 struct virtio_hw *hw = dev->data->dev_private;
966 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
968 PMD_INIT_FUNC_TRACE();
970 if (!vtpci_packed_queue(hw)) {
971 if (hw->use_inorder_tx)
972 vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
981 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
985 * Requeue the discarded mbuf. This should always be
986 * successful since it was just dequeued.
988 if (vtpci_packed_queue(vq->hw))
989 error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
991 error = virtqueue_enqueue_recv_refill(vq, &m, 1);
993 if (unlikely(error)) {
994 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1000 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
1004 error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
1005 if (unlikely(error)) {
1006 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
1007 rte_pktmbuf_free(m);
1012 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
1014 uint32_t s = mbuf->pkt_len;
1015 struct ether_addr *ea;
1020 stats->size_bins[1]++;
1021 } else if (s > 64 && s < 1024) {
1024 /* count zeros, and offset into correct bin */
1025 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
1026 stats->size_bins[bin]++;
1029 stats->size_bins[0]++;
1031 stats->size_bins[6]++;
1033 stats->size_bins[7]++;
1036 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
1037 if (is_multicast_ether_addr(ea)) {
1038 if (is_broadcast_ether_addr(ea))
1046 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
1048 VIRTIO_DUMP_PACKET(m, m->data_len);
1050 virtio_update_packet_stats(&rxvq->stats, m);
1053 /* Optionally fill offload information in structure */
1055 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
1057 struct rte_net_hdr_lens hdr_lens;
1058 uint32_t hdrlen, ptype;
1059 int l4_supported = 0;
1062 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1065 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
1067 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
1068 m->packet_type = ptype;
1069 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
1070 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
1071 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
1074 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1075 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
1076 if (hdr->csum_start <= hdrlen && l4_supported) {
1077 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
1079 /* Unknown proto or tunnel, do sw cksum. We can assume
1080 * the cksum field is in the first segment since the
1081 * buffers we provided to the host are large enough.
1082 * In case of SCTP, this will be wrong since it's a CRC
1083 * but there's nothing we can do.
1085 uint16_t csum = 0, off;
1087 rte_raw_cksum_mbuf(m, hdr->csum_start,
1088 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
1090 if (likely(csum != 0xffff))
1092 off = hdr->csum_offset + hdr->csum_start;
1093 if (rte_pktmbuf_data_len(m) >= off + 1)
1094 *rte_pktmbuf_mtod_offset(m, uint16_t *,
1097 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
1098 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1101 /* GSO request, save required information in mbuf */
1102 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1103 /* Check unsupported modes */
1104 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
1105 (hdr->gso_size == 0)) {
1109 /* Update mss lengthes in mbuf */
1110 m->tso_segsz = hdr->gso_size;
1111 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1112 case VIRTIO_NET_HDR_GSO_TCPV4:
1113 case VIRTIO_NET_HDR_GSO_TCPV6:
1114 m->ol_flags |= PKT_RX_LRO | \
1115 PKT_RX_L4_CKSUM_NONE;
1125 #define VIRTIO_MBUF_BURST_SZ 64
1126 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
1128 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1130 struct virtnet_rx *rxvq = rx_queue;
1131 struct virtqueue *vq = rxvq->vq;
1132 struct virtio_hw *hw = vq->hw;
1133 struct rte_mbuf *rxm, *new_mbuf;
1134 uint16_t nb_used, num, nb_rx;
1135 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1136 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1138 uint32_t i, nb_enqueued;
1140 struct virtio_net_hdr *hdr;
1143 if (unlikely(hw->started == 0))
1146 nb_used = VIRTQUEUE_NUSED(vq);
1148 virtio_rmb(hw->weak_barriers);
1150 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1151 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1152 num = VIRTIO_MBUF_BURST_SZ;
1153 if (likely(num > DESC_PER_CACHELINE))
1154 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1156 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1157 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1160 hdr_size = hw->vtnet_hdr_size;
1162 for (i = 0; i < num ; i++) {
1165 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1167 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1168 PMD_RX_LOG(ERR, "Packet drop");
1170 virtio_discard_rxbuf(vq, rxm);
1171 rxvq->stats.errors++;
1175 rxm->port = rxvq->port_id;
1176 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1180 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1181 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1183 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1184 RTE_PKTMBUF_HEADROOM - hdr_size);
1187 rte_vlan_strip(rxm);
1189 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1190 virtio_discard_rxbuf(vq, rxm);
1191 rxvq->stats.errors++;
1195 virtio_rx_stats_updated(rxvq, rxm);
1197 rx_pkts[nb_rx++] = rxm;
1200 rxvq->stats.packets += nb_rx;
1202 /* Allocate new mbuf for the used descriptor */
1203 while (likely(!virtqueue_full(vq))) {
1204 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1205 if (unlikely(new_mbuf == NULL)) {
1206 struct rte_eth_dev *dev
1207 = &rte_eth_devices[rxvq->port_id];
1208 dev->data->rx_mbuf_alloc_failed++;
1211 error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1);
1212 if (unlikely(error)) {
1213 rte_pktmbuf_free(new_mbuf);
1219 if (likely(nb_enqueued)) {
1220 vq_update_avail_idx(vq);
1222 if (unlikely(virtqueue_kick_prepare(vq))) {
1223 virtqueue_notify(vq);
1224 PMD_RX_LOG(DEBUG, "Notified");
1232 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1235 struct virtnet_rx *rxvq = rx_queue;
1236 struct virtqueue *vq = rxvq->vq;
1237 struct virtio_hw *hw = vq->hw;
1238 struct rte_mbuf *rxm, *new_mbuf;
1239 uint16_t num, nb_rx;
1240 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1241 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1243 uint32_t i, nb_enqueued;
1245 struct virtio_net_hdr *hdr;
1248 if (unlikely(hw->started == 0))
1251 num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1252 if (likely(num > DESC_PER_CACHELINE))
1253 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1255 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1256 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1259 hdr_size = hw->vtnet_hdr_size;
1261 for (i = 0; i < num; i++) {
1264 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1266 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1267 PMD_RX_LOG(ERR, "Packet drop");
1269 virtio_discard_rxbuf(vq, rxm);
1270 rxvq->stats.errors++;
1274 rxm->port = rxvq->port_id;
1275 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1279 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1280 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1282 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1283 RTE_PKTMBUF_HEADROOM - hdr_size);
1286 rte_vlan_strip(rxm);
1288 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1289 virtio_discard_rxbuf(vq, rxm);
1290 rxvq->stats.errors++;
1294 virtio_rx_stats_updated(rxvq, rxm);
1296 rx_pkts[nb_rx++] = rxm;
1299 rxvq->stats.packets += nb_rx;
1301 /* Allocate new mbuf for the used descriptor */
1302 while (likely(!virtqueue_full(vq))) {
1303 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1304 if (unlikely(new_mbuf == NULL)) {
1305 struct rte_eth_dev *dev =
1306 &rte_eth_devices[rxvq->port_id];
1307 dev->data->rx_mbuf_alloc_failed++;
1310 error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
1311 if (unlikely(error)) {
1312 rte_pktmbuf_free(new_mbuf);
1318 if (likely(nb_enqueued)) {
1319 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1320 virtqueue_notify(vq);
1321 PMD_RX_LOG(DEBUG, "Notified");
1330 virtio_recv_pkts_inorder(void *rx_queue,
1331 struct rte_mbuf **rx_pkts,
1334 struct virtnet_rx *rxvq = rx_queue;
1335 struct virtqueue *vq = rxvq->vq;
1336 struct virtio_hw *hw = vq->hw;
1337 struct rte_mbuf *rxm;
1338 struct rte_mbuf *prev;
1339 uint16_t nb_used, num, nb_rx;
1340 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1341 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1343 uint32_t nb_enqueued;
1350 if (unlikely(hw->started == 0))
1353 nb_used = VIRTQUEUE_NUSED(vq);
1354 nb_used = RTE_MIN(nb_used, nb_pkts);
1355 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1357 virtio_rmb(hw->weak_barriers);
1359 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1364 hdr_size = hw->vtnet_hdr_size;
1366 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1368 for (i = 0; i < num; i++) {
1369 struct virtio_net_hdr_mrg_rxbuf *header;
1371 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1372 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1376 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1377 PMD_RX_LOG(ERR, "Packet drop");
1379 virtio_discard_rxbuf_inorder(vq, rxm);
1380 rxvq->stats.errors++;
1384 header = (struct virtio_net_hdr_mrg_rxbuf *)
1385 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1388 if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1389 seg_num = header->num_buffers;
1396 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1397 rxm->nb_segs = seg_num;
1400 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1401 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1403 rxm->port = rxvq->port_id;
1405 rx_pkts[nb_rx] = rxm;
1408 if (vq->hw->has_rx_offload &&
1409 virtio_rx_offload(rxm, &header->hdr) < 0) {
1410 virtio_discard_rxbuf_inorder(vq, rxm);
1411 rxvq->stats.errors++;
1416 rte_vlan_strip(rx_pkts[nb_rx]);
1418 seg_res = seg_num - 1;
1420 /* Merge remaining segments */
1421 while (seg_res != 0 && i < (num - 1)) {
1425 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1426 rxm->pkt_len = (uint32_t)(len[i]);
1427 rxm->data_len = (uint16_t)(len[i]);
1429 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1430 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1440 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1445 /* Last packet still need merge segments */
1446 while (seg_res != 0) {
1447 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1448 VIRTIO_MBUF_BURST_SZ);
1450 prev = rcv_pkts[nb_rx];
1451 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1452 virtio_rmb(hw->weak_barriers);
1453 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1455 uint16_t extra_idx = 0;
1458 while (extra_idx < rcv_cnt) {
1459 rxm = rcv_pkts[extra_idx];
1461 RTE_PKTMBUF_HEADROOM - hdr_size;
1462 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1463 rxm->data_len = (uint16_t)(len[extra_idx]);
1466 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1467 rx_pkts[nb_rx]->data_len += len[extra_idx];
1473 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1478 "No enough segments for packet.");
1479 virtio_discard_rxbuf_inorder(vq, prev);
1480 rxvq->stats.errors++;
1485 rxvq->stats.packets += nb_rx;
1487 /* Allocate new mbuf for the used descriptor */
1489 if (likely(!virtqueue_full(vq))) {
1490 /* free_cnt may include mrg descs */
1491 uint16_t free_cnt = vq->vq_free_cnt;
1492 struct rte_mbuf *new_pkts[free_cnt];
1494 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1495 error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1497 if (unlikely(error)) {
1498 for (i = 0; i < free_cnt; i++)
1499 rte_pktmbuf_free(new_pkts[i]);
1501 nb_enqueued += free_cnt;
1503 struct rte_eth_dev *dev =
1504 &rte_eth_devices[rxvq->port_id];
1505 dev->data->rx_mbuf_alloc_failed += free_cnt;
1509 if (likely(nb_enqueued)) {
1510 vq_update_avail_idx(vq);
1512 if (unlikely(virtqueue_kick_prepare(vq))) {
1513 virtqueue_notify(vq);
1514 PMD_RX_LOG(DEBUG, "Notified");
1522 virtio_recv_mergeable_pkts(void *rx_queue,
1523 struct rte_mbuf **rx_pkts,
1526 struct virtnet_rx *rxvq = rx_queue;
1527 struct virtqueue *vq = rxvq->vq;
1528 struct virtio_hw *hw = vq->hw;
1529 struct rte_mbuf *rxm;
1530 struct rte_mbuf *prev;
1531 uint16_t nb_used, num, nb_rx = 0;
1532 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1533 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1535 uint32_t nb_enqueued = 0;
1536 uint32_t seg_num = 0;
1537 uint32_t seg_res = 0;
1538 uint32_t hdr_size = hw->vtnet_hdr_size;
1541 if (unlikely(hw->started == 0))
1544 nb_used = VIRTQUEUE_NUSED(vq);
1546 virtio_rmb(hw->weak_barriers);
1548 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1550 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1551 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1552 num = VIRTIO_MBUF_BURST_SZ;
1553 if (likely(num > DESC_PER_CACHELINE))
1554 num = num - ((vq->vq_used_cons_idx + num) %
1555 DESC_PER_CACHELINE);
1558 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1560 for (i = 0; i < num; i++) {
1561 struct virtio_net_hdr_mrg_rxbuf *header;
1563 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1564 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1568 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1569 PMD_RX_LOG(ERR, "Packet drop");
1571 virtio_discard_rxbuf(vq, rxm);
1572 rxvq->stats.errors++;
1576 header = (struct virtio_net_hdr_mrg_rxbuf *)
1577 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1579 seg_num = header->num_buffers;
1583 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1584 rxm->nb_segs = seg_num;
1587 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1588 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1590 rxm->port = rxvq->port_id;
1592 rx_pkts[nb_rx] = rxm;
1595 if (hw->has_rx_offload &&
1596 virtio_rx_offload(rxm, &header->hdr) < 0) {
1597 virtio_discard_rxbuf(vq, rxm);
1598 rxvq->stats.errors++;
1603 rte_vlan_strip(rx_pkts[nb_rx]);
1605 seg_res = seg_num - 1;
1607 /* Merge remaining segments */
1608 while (seg_res != 0 && i < (num - 1)) {
1612 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1613 rxm->pkt_len = (uint32_t)(len[i]);
1614 rxm->data_len = (uint16_t)(len[i]);
1616 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1617 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1627 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1632 /* Last packet still need merge segments */
1633 while (seg_res != 0) {
1634 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1635 VIRTIO_MBUF_BURST_SZ);
1637 prev = rcv_pkts[nb_rx];
1638 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1639 virtio_rmb(hw->weak_barriers);
1640 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1642 uint16_t extra_idx = 0;
1645 while (extra_idx < rcv_cnt) {
1646 rxm = rcv_pkts[extra_idx];
1648 RTE_PKTMBUF_HEADROOM - hdr_size;
1649 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1650 rxm->data_len = (uint16_t)(len[extra_idx]);
1653 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1654 rx_pkts[nb_rx]->data_len += len[extra_idx];
1660 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1665 "No enough segments for packet.");
1666 virtio_discard_rxbuf(vq, prev);
1667 rxvq->stats.errors++;
1672 rxvq->stats.packets += nb_rx;
1674 /* Allocate new mbuf for the used descriptor */
1675 if (likely(!virtqueue_full(vq))) {
1676 /* free_cnt may include mrg descs */
1677 uint16_t free_cnt = vq->vq_free_cnt;
1678 struct rte_mbuf *new_pkts[free_cnt];
1680 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1681 error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1683 if (unlikely(error)) {
1684 for (i = 0; i < free_cnt; i++)
1685 rte_pktmbuf_free(new_pkts[i]);
1687 nb_enqueued += free_cnt;
1689 struct rte_eth_dev *dev =
1690 &rte_eth_devices[rxvq->port_id];
1691 dev->data->rx_mbuf_alloc_failed += free_cnt;
1695 if (likely(nb_enqueued)) {
1696 vq_update_avail_idx(vq);
1698 if (unlikely(virtqueue_kick_prepare(vq))) {
1699 virtqueue_notify(vq);
1700 PMD_RX_LOG(DEBUG, "Notified");
1708 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1709 struct rte_mbuf **rx_pkts,
1712 struct virtnet_rx *rxvq = rx_queue;
1713 struct virtqueue *vq = rxvq->vq;
1714 struct virtio_hw *hw = vq->hw;
1715 struct rte_mbuf *rxm;
1716 struct rte_mbuf *prev = NULL;
1717 uint16_t num, nb_rx = 0;
1718 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1719 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1720 uint32_t nb_enqueued = 0;
1721 uint32_t seg_num = 0;
1722 uint32_t seg_res = 0;
1723 uint32_t hdr_size = hw->vtnet_hdr_size;
1727 if (unlikely(hw->started == 0))
1732 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1733 num = VIRTIO_MBUF_BURST_SZ;
1734 if (likely(num > DESC_PER_CACHELINE))
1735 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1737 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1739 for (i = 0; i < num; i++) {
1740 struct virtio_net_hdr_mrg_rxbuf *header;
1742 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1743 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1747 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1748 PMD_RX_LOG(ERR, "Packet drop");
1750 virtio_discard_rxbuf(vq, rxm);
1751 rxvq->stats.errors++;
1755 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1756 rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1757 seg_num = header->num_buffers;
1762 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1763 rxm->nb_segs = seg_num;
1766 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1767 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1769 rxm->port = rxvq->port_id;
1770 rx_pkts[nb_rx] = rxm;
1773 if (hw->has_rx_offload &&
1774 virtio_rx_offload(rxm, &header->hdr) < 0) {
1775 virtio_discard_rxbuf(vq, rxm);
1776 rxvq->stats.errors++;
1781 rte_vlan_strip(rx_pkts[nb_rx]);
1783 seg_res = seg_num - 1;
1785 /* Merge remaining segments */
1786 while (seg_res != 0 && i < (num - 1)) {
1790 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1791 rxm->pkt_len = (uint32_t)(len[i]);
1792 rxm->data_len = (uint16_t)(len[i]);
1794 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1795 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1805 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1810 /* Last packet still need merge segments */
1811 while (seg_res != 0) {
1812 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1813 VIRTIO_MBUF_BURST_SZ);
1814 if (likely(vq->vq_free_cnt >= rcv_cnt)) {
1815 num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1817 uint16_t extra_idx = 0;
1821 while (extra_idx < rcv_cnt) {
1822 rxm = rcv_pkts[extra_idx];
1825 RTE_PKTMBUF_HEADROOM - hdr_size;
1826 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1827 rxm->data_len = (uint16_t)(len[extra_idx]);
1831 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1832 rx_pkts[nb_rx]->data_len += len[extra_idx];
1837 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1842 "No enough segments for packet.");
1844 virtio_discard_rxbuf(vq, prev);
1845 rxvq->stats.errors++;
1850 rxvq->stats.packets += nb_rx;
1852 /* Allocate new mbuf for the used descriptor */
1853 if (likely(!virtqueue_full(vq))) {
1854 /* free_cnt may include mrg descs */
1855 uint16_t free_cnt = vq->vq_free_cnt;
1856 struct rte_mbuf *new_pkts[free_cnt];
1858 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1859 error = virtqueue_enqueue_recv_refill_packed(vq,
1860 new_pkts, free_cnt);
1861 if (unlikely(error)) {
1862 for (i = 0; i < free_cnt; i++)
1863 rte_pktmbuf_free(new_pkts[i]);
1865 nb_enqueued += free_cnt;
1867 struct rte_eth_dev *dev =
1868 &rte_eth_devices[rxvq->port_id];
1869 dev->data->rx_mbuf_alloc_failed += free_cnt;
1873 if (likely(nb_enqueued)) {
1874 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1875 virtqueue_notify(vq);
1876 PMD_RX_LOG(DEBUG, "Notified");
1884 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1887 struct virtnet_tx *txvq = tx_queue;
1888 struct virtqueue *vq = txvq->vq;
1889 struct virtio_hw *hw = vq->hw;
1890 uint16_t hdr_size = hw->vtnet_hdr_size;
1894 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1897 if (unlikely(nb_pkts < 1))
1900 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1902 if (nb_pkts > vq->vq_free_cnt)
1903 virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
1905 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1906 struct rte_mbuf *txm = tx_pkts[nb_tx];
1907 int can_push = 0, slots, need;
1909 /* Do VLAN tag insertion */
1910 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1911 error = rte_vlan_insert(&txm);
1912 if (unlikely(error)) {
1913 rte_pktmbuf_free(txm);
1918 /* optimize ring usage */
1919 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1920 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1921 rte_mbuf_refcnt_read(txm) == 1 &&
1922 RTE_MBUF_DIRECT(txm) &&
1923 txm->nb_segs == 1 &&
1924 rte_pktmbuf_headroom(txm) >= hdr_size &&
1925 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1926 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1929 /* How many main ring entries are needed to this Tx?
1930 * any_layout => number of segments
1931 * default => number of segments + 1
1933 slots = txm->nb_segs + !can_push;
1934 need = slots - vq->vq_free_cnt;
1936 /* Positive value indicates it need free vring descriptors */
1937 if (unlikely(need > 0)) {
1938 virtio_xmit_cleanup_packed(vq, need);
1939 need = slots - vq->vq_free_cnt;
1940 if (unlikely(need > 0)) {
1942 "No free tx descriptors to transmit");
1947 /* Enqueue Packet buffers */
1948 virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
1950 virtio_update_packet_stats(&txvq->stats, txm);
1953 txvq->stats.packets += nb_tx;
1955 if (likely(nb_tx)) {
1956 if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1957 virtqueue_notify(vq);
1958 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1966 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1968 struct virtnet_tx *txvq = tx_queue;
1969 struct virtqueue *vq = txvq->vq;
1970 struct virtio_hw *hw = vq->hw;
1971 uint16_t hdr_size = hw->vtnet_hdr_size;
1972 uint16_t nb_used, nb_tx = 0;
1975 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1978 if (unlikely(nb_pkts < 1))
1981 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1982 nb_used = VIRTQUEUE_NUSED(vq);
1984 virtio_rmb(hw->weak_barriers);
1985 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1986 virtio_xmit_cleanup(vq, nb_used);
1988 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1989 struct rte_mbuf *txm = tx_pkts[nb_tx];
1990 int can_push = 0, use_indirect = 0, slots, need;
1992 /* Do VLAN tag insertion */
1993 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1994 error = rte_vlan_insert(&txm);
1995 if (unlikely(error)) {
1996 rte_pktmbuf_free(txm);
2001 /* optimize ring usage */
2002 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2003 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2004 rte_mbuf_refcnt_read(txm) == 1 &&
2005 RTE_MBUF_DIRECT(txm) &&
2006 txm->nb_segs == 1 &&
2007 rte_pktmbuf_headroom(txm) >= hdr_size &&
2008 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2009 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
2011 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
2012 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
2015 /* How many main ring entries are needed to this Tx?
2016 * any_layout => number of segments
2018 * default => number of segments + 1
2020 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
2021 need = slots - vq->vq_free_cnt;
2023 /* Positive value indicates it need free vring descriptors */
2024 if (unlikely(need > 0)) {
2025 nb_used = VIRTQUEUE_NUSED(vq);
2026 virtio_rmb(hw->weak_barriers);
2027 need = RTE_MIN(need, (int)nb_used);
2029 virtio_xmit_cleanup(vq, need);
2030 need = slots - vq->vq_free_cnt;
2031 if (unlikely(need > 0)) {
2033 "No free tx descriptors to transmit");
2038 /* Enqueue Packet buffers */
2039 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
2042 virtio_update_packet_stats(&txvq->stats, txm);
2045 txvq->stats.packets += nb_tx;
2047 if (likely(nb_tx)) {
2048 vq_update_avail_idx(vq);
2050 if (unlikely(virtqueue_kick_prepare(vq))) {
2051 virtqueue_notify(vq);
2052 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2060 virtio_xmit_pkts_inorder(void *tx_queue,
2061 struct rte_mbuf **tx_pkts,
2064 struct virtnet_tx *txvq = tx_queue;
2065 struct virtqueue *vq = txvq->vq;
2066 struct virtio_hw *hw = vq->hw;
2067 uint16_t hdr_size = hw->vtnet_hdr_size;
2068 uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
2069 struct rte_mbuf *inorder_pkts[nb_pkts];
2072 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
2075 if (unlikely(nb_pkts < 1))
2079 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
2080 nb_used = VIRTQUEUE_NUSED(vq);
2082 virtio_rmb(hw->weak_barriers);
2083 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
2084 virtio_xmit_cleanup_inorder(vq, nb_used);
2086 if (unlikely(!vq->vq_free_cnt))
2087 virtio_xmit_cleanup_inorder(vq, nb_used);
2089 nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
2091 for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
2092 struct rte_mbuf *txm = tx_pkts[nb_tx];
2095 /* Do VLAN tag insertion */
2096 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
2097 error = rte_vlan_insert(&txm);
2098 if (unlikely(error)) {
2099 rte_pktmbuf_free(txm);
2104 /* optimize ring usage */
2105 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
2106 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
2107 rte_mbuf_refcnt_read(txm) == 1 &&
2108 RTE_MBUF_DIRECT(txm) &&
2109 txm->nb_segs == 1 &&
2110 rte_pktmbuf_headroom(txm) >= hdr_size &&
2111 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
2112 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
2113 inorder_pkts[nb_inorder_pkts] = txm;
2116 virtio_update_packet_stats(&txvq->stats, txm);
2120 if (nb_inorder_pkts) {
2121 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2123 nb_inorder_pkts = 0;
2126 slots = txm->nb_segs + 1;
2127 need = slots - vq->vq_free_cnt;
2128 if (unlikely(need > 0)) {
2129 nb_used = VIRTQUEUE_NUSED(vq);
2130 virtio_rmb(hw->weak_barriers);
2131 need = RTE_MIN(need, (int)nb_used);
2133 virtio_xmit_cleanup_inorder(vq, need);
2135 need = slots - vq->vq_free_cnt;
2137 if (unlikely(need > 0)) {
2139 "No free tx descriptors to transmit");
2143 /* Enqueue Packet buffers */
2144 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2146 virtio_update_packet_stats(&txvq->stats, txm);
2149 /* Transmit all inorder packets */
2150 if (nb_inorder_pkts)
2151 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2154 txvq->stats.packets += nb_tx;
2156 if (likely(nb_tx)) {
2157 vq_update_avail_idx(vq);
2159 if (unlikely(virtqueue_kick_prepare(vq))) {
2160 virtqueue_notify(vq);
2161 PMD_TX_LOG(DEBUG, "Notified backend after xmit");