4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_cycles.h>
41 #include <rte_memory.h>
42 #include <rte_memzone.h>
43 #include <rte_branch_prediction.h>
44 #include <rte_mempool.h>
45 #include <rte_malloc.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_prefetch.h>
50 #include <rte_string_fns.h>
51 #include <rte_errno.h>
52 #include <rte_byteorder.h>
53 #include <rte_cpuflags.h>
59 #include "virtio_logs.h"
60 #include "virtio_ethdev.h"
61 #include "virtio_pci.h"
62 #include "virtqueue.h"
63 #include "virtio_rxtx.h"
65 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
66 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
68 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
72 #define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
73 ETH_TXQ_FLAGS_NOOFFLOADS)
76 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
78 struct vring_desc *dp, *dp_tail;
79 struct vq_desc_extra *dxp;
80 uint16_t desc_idx_last = desc_idx;
82 dp = &vq->vq_ring.desc[desc_idx];
83 dxp = &vq->vq_descx[desc_idx];
84 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
85 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
86 while (dp->flags & VRING_DESC_F_NEXT) {
87 desc_idx_last = dp->next;
88 dp = &vq->vq_ring.desc[dp->next];
94 * We must append the existing free chain, if any, to the end of
95 * newly freed chain. If the virtqueue was completely used, then
96 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
98 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
99 vq->vq_desc_head_idx = desc_idx;
101 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
102 dp_tail->next = desc_idx;
105 vq->vq_desc_tail_idx = desc_idx_last;
106 dp->next = VQ_RING_DESC_CHAIN_END;
110 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
111 uint32_t *len, uint16_t num)
113 struct vring_used_elem *uep;
114 struct rte_mbuf *cookie;
115 uint16_t used_idx, desc_idx;
118 /* Caller does the check */
119 for (i = 0; i < num ; i++) {
120 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
121 uep = &vq->vq_ring.used->ring[used_idx];
122 desc_idx = (uint16_t) uep->id;
124 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
126 if (unlikely(cookie == NULL)) {
127 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
128 vq->vq_used_cons_idx);
132 rte_prefetch0(cookie);
133 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
135 vq->vq_used_cons_idx++;
136 vq_ring_free_chain(vq, desc_idx);
137 vq->vq_descx[desc_idx].cookie = NULL;
143 #ifndef DEFAULT_TX_FREE_THRESH
144 #define DEFAULT_TX_FREE_THRESH 32
147 /* Cleanup from completed transmits. */
149 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
151 uint16_t i, used_idx, desc_idx;
152 for (i = 0; i < num; i++) {
153 struct vring_used_elem *uep;
154 struct vq_desc_extra *dxp;
156 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
157 uep = &vq->vq_ring.used->ring[used_idx];
159 desc_idx = (uint16_t) uep->id;
160 dxp = &vq->vq_descx[desc_idx];
161 vq->vq_used_cons_idx++;
162 vq_ring_free_chain(vq, desc_idx);
164 if (dxp->cookie != NULL) {
165 rte_pktmbuf_free(dxp->cookie);
173 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
175 struct vq_desc_extra *dxp;
176 struct virtio_hw *hw = vq->hw;
177 struct vring_desc *start_dp;
179 uint16_t head_idx, idx;
181 if (unlikely(vq->vq_free_cnt == 0))
183 if (unlikely(vq->vq_free_cnt < needed))
186 head_idx = vq->vq_desc_head_idx;
187 if (unlikely(head_idx >= vq->vq_nentries))
191 dxp = &vq->vq_descx[idx];
192 dxp->cookie = (void *)cookie;
193 dxp->ndescs = needed;
195 start_dp = vq->vq_ring.desc;
197 VIRTIO_MBUF_ADDR(cookie, vq) +
198 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
200 cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
201 start_dp[idx].flags = VRING_DESC_F_WRITE;
202 idx = start_dp[idx].next;
203 vq->vq_desc_head_idx = idx;
204 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
205 vq->vq_desc_tail_idx = idx;
206 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
207 vq_update_avail_ring(vq, head_idx);
212 /* When doing TSO, the IP length is not included in the pseudo header
213 * checksum of the packet given to the PMD, but for virtio it is
217 virtio_tso_fix_cksum(struct rte_mbuf *m)
219 /* common case: header is not fragmented */
220 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
222 struct ipv4_hdr *iph;
223 struct ipv6_hdr *ip6h;
225 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
228 iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
229 th = RTE_PTR_ADD(iph, m->l3_len);
230 if ((iph->version_ihl >> 4) == 4) {
231 iph->hdr_checksum = 0;
232 iph->hdr_checksum = rte_ipv4_cksum(iph);
233 ip_len = iph->total_length;
234 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
237 ip6h = (struct ipv6_hdr *)iph;
238 ip_paylen = ip6h->payload_len;
241 /* calculate the new phdr checksum not including ip_paylen */
242 prev_cksum = th->cksum;
245 tmp = (tmp & 0xffff) + (tmp >> 16);
248 /* replace it in the packet */
249 th->cksum = new_cksum;
254 tx_offload_enabled(struct virtio_hw *hw)
256 return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
257 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
258 vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
262 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
263 uint16_t needed, int use_indirect, int can_push)
265 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
266 struct vq_desc_extra *dxp;
267 struct virtqueue *vq = txvq->vq;
268 struct vring_desc *start_dp;
269 uint16_t seg_num = cookie->nb_segs;
270 uint16_t head_idx, idx;
271 uint16_t head_size = vq->hw->vtnet_hdr_size;
272 struct virtio_net_hdr *hdr;
275 offload = tx_offload_enabled(vq->hw);
276 head_idx = vq->vq_desc_head_idx;
278 dxp = &vq->vq_descx[idx];
279 dxp->cookie = (void *)cookie;
280 dxp->ndescs = needed;
282 start_dp = vq->vq_ring.desc;
285 /* prepend cannot fail, checked by caller */
286 hdr = (struct virtio_net_hdr *)
287 rte_pktmbuf_prepend(cookie, head_size);
288 /* if offload disabled, it is not zeroed below, do it now */
290 memset(hdr, 0, head_size);
291 } else if (use_indirect) {
292 /* setup tx ring slot to point to indirect
293 * descriptor list stored in reserved region.
295 * the first slot in indirect ring is already preset
296 * to point to the header in reserved region
298 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
299 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
300 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
301 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
302 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
304 /* loop below will fill in rest of the indirect elements */
305 start_dp = txr[idx].tx_indir;
308 /* setup first tx ring slot to point to header
309 * stored in reserved region.
311 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
312 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
313 start_dp[idx].len = vq->hw->vtnet_hdr_size;
314 start_dp[idx].flags = VRING_DESC_F_NEXT;
315 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
317 idx = start_dp[idx].next;
320 /* Checksum Offload / TSO */
322 if (cookie->ol_flags & PKT_TX_TCP_SEG)
323 cookie->ol_flags |= PKT_TX_TCP_CKSUM;
325 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
326 case PKT_TX_UDP_CKSUM:
327 hdr->csum_start = cookie->l2_len + cookie->l3_len;
328 hdr->csum_offset = offsetof(struct udp_hdr,
330 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
333 case PKT_TX_TCP_CKSUM:
334 hdr->csum_start = cookie->l2_len + cookie->l3_len;
335 hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
336 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
341 hdr->csum_offset = 0;
346 /* TCP Segmentation Offload */
347 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
348 virtio_tso_fix_cksum(cookie);
349 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
350 VIRTIO_NET_HDR_GSO_TCPV6 :
351 VIRTIO_NET_HDR_GSO_TCPV4;
352 hdr->gso_size = cookie->tso_segsz;
365 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
366 start_dp[idx].len = cookie->data_len;
367 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
368 idx = start_dp[idx].next;
369 } while ((cookie = cookie->next) != NULL);
372 idx = vq->vq_ring.desc[head_idx].next;
374 vq->vq_desc_head_idx = idx;
375 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
376 vq->vq_desc_tail_idx = idx;
377 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
378 vq_update_avail_ring(vq, head_idx);
382 virtio_dev_vring_start(struct virtqueue *vq)
384 int size = vq->vq_nentries;
385 struct vring *vr = &vq->vq_ring;
386 uint8_t *ring_mem = vq->vq_ring_virt_mem;
388 PMD_INIT_FUNC_TRACE();
391 * Reinitialise since virtio port might have been stopped and restarted
393 memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
394 vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
395 vq->vq_used_cons_idx = 0;
396 vq->vq_desc_head_idx = 0;
397 vq->vq_avail_idx = 0;
398 vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
399 vq->vq_free_cnt = vq->vq_nentries;
400 memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
402 vring_desc_init(vr->desc, size);
405 * Disable device(host) interrupting guest
407 virtqueue_disable_intr(vq);
411 virtio_dev_cq_start(struct rte_eth_dev *dev)
413 struct virtio_hw *hw = dev->data->dev_private;
415 if (hw->cvq && hw->cvq->vq) {
416 virtio_dev_vring_start(hw->cvq->vq);
417 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
422 virtio_dev_rxtx_start(struct rte_eth_dev *dev)
425 * Start receive and transmit vrings
426 * - Setup vring structure for all queues
427 * - Initialize descriptor for the rx vring
428 * - Allocate blank mbufs for the each rx descriptor
433 struct virtio_hw *hw = dev->data->dev_private;
435 PMD_INIT_FUNC_TRACE();
437 /* Start rx vring. */
438 for (i = 0; i < dev->data->nb_rx_queues; i++) {
439 struct virtnet_rx *rxvq = dev->data->rx_queues[i];
440 struct virtqueue *vq = rxvq->vq;
444 virtio_dev_vring_start(vq);
445 if (rxvq->mpool == NULL) {
446 rte_exit(EXIT_FAILURE,
447 "Cannot allocate mbufs for rx virtqueue");
450 /* Allocate blank mbufs for the each rx descriptor */
454 if (hw->use_simple_rxtx) {
455 for (desc_idx = 0; desc_idx < vq->vq_nentries;
457 vq->vq_ring.avail->ring[desc_idx] = desc_idx;
458 vq->vq_ring.desc[desc_idx].flags =
463 memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
464 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
466 vq->sw_ring[vq->vq_nentries + desc_idx] =
470 while (!virtqueue_full(vq)) {
471 m = rte_mbuf_raw_alloc(rxvq->mpool);
475 /******************************************
476 * Enqueue allocated buffers *
477 *******************************************/
478 if (hw->use_simple_rxtx)
479 error = virtqueue_enqueue_recv_refill_simple(vq, m);
481 error = virtqueue_enqueue_recv_refill(vq, m);
490 vq_update_avail_idx(vq);
492 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
497 /* Start tx vring. */
498 for (i = 0; i < dev->data->nb_tx_queues; i++) {
499 struct virtnet_tx *txvq = dev->data->tx_queues[i];
500 struct virtqueue *vq = txvq->vq;
502 virtio_dev_vring_start(vq);
503 if (hw->use_simple_rxtx) {
504 uint16_t mid_idx = vq->vq_nentries >> 1;
506 for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
507 vq->vq_ring.avail->ring[desc_idx] =
509 vq->vq_ring.desc[desc_idx + mid_idx].next =
511 vq->vq_ring.desc[desc_idx + mid_idx].addr =
512 txvq->virtio_net_hdr_mem +
513 offsetof(struct virtio_tx_region, tx_hdr);
514 vq->vq_ring.desc[desc_idx + mid_idx].len =
515 vq->hw->vtnet_hdr_size;
516 vq->vq_ring.desc[desc_idx + mid_idx].flags =
518 vq->vq_ring.desc[desc_idx].flags = 0;
520 for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
522 vq->vq_ring.avail->ring[desc_idx] = desc_idx;
530 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
533 unsigned int socket_id,
534 __rte_unused const struct rte_eth_rxconf *rx_conf,
535 struct rte_mempool *mp)
537 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
538 struct virtnet_rx *rxvq;
541 PMD_INIT_FUNC_TRACE();
542 ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
543 nb_desc, socket_id, (void **)&rxvq);
545 PMD_INIT_LOG(ERR, "rvq initialization failed");
549 /* Create mempool for rx mbuf allocation */
552 dev->data->rx_queues[queue_idx] = rxvq;
554 virtio_rxq_vec_setup(rxvq);
560 virtio_dev_rx_queue_release(void *rxq)
562 struct virtnet_rx *rxvq = rxq;
563 struct virtqueue *vq;
564 const struct rte_memzone *mz;
570 * rxvq is freed when vq is freed, and as mz should be freed after the
571 * del_queue, so we reserve the mz pointer first.
576 virtio_dev_queue_release(vq);
577 rte_memzone_free(mz);
581 virtio_update_rxtx_handler(struct rte_eth_dev *dev,
582 const struct rte_eth_txconf *tx_conf)
584 uint8_t use_simple_rxtx = 0;
585 struct virtio_hw *hw = dev->data->dev_private;
587 #if defined RTE_ARCH_X86
588 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
590 #elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
591 if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
594 /* Use simple rx/tx func if single segment and no offloads */
595 if (use_simple_rxtx &&
596 (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
597 !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
598 PMD_INIT_LOG(INFO, "Using simple rx/tx path");
599 dev->tx_pkt_burst = virtio_xmit_pkts_simple;
600 dev->rx_pkt_burst = virtio_recv_pkts_vec;
601 hw->use_simple_rxtx = use_simple_rxtx;
606 * struct rte_eth_dev *dev: Used to update dev
607 * uint16_t nb_desc: Defaults to values read from config space
608 * unsigned int socket_id: Used to allocate memzone
609 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
610 * uint16_t queue_idx: Just used as an index in dev txq list
613 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
616 unsigned int socket_id,
617 const struct rte_eth_txconf *tx_conf)
619 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
620 struct virtnet_tx *txvq;
621 struct virtqueue *vq;
622 uint16_t tx_free_thresh;
625 PMD_INIT_FUNC_TRACE();
628 virtio_update_rxtx_handler(dev, tx_conf);
630 ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
631 nb_desc, socket_id, (void **)&txvq);
633 PMD_INIT_LOG(ERR, "tvq initialization failed");
638 tx_free_thresh = tx_conf->tx_free_thresh;
639 if (tx_free_thresh == 0)
641 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
643 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
644 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
645 "number of TX entries minus 3 (%u)."
646 " (tx_free_thresh=%u port=%u queue=%u)\n",
648 tx_free_thresh, dev->data->port_id, queue_idx);
652 vq->vq_free_thresh = tx_free_thresh;
654 dev->data->tx_queues[queue_idx] = txvq;
659 virtio_dev_tx_queue_release(void *txq)
661 struct virtnet_tx *txvq = txq;
662 struct virtqueue *vq;
663 const struct rte_memzone *mz;
664 const struct rte_memzone *hdr_mz;
670 * txvq is freed when vq is freed, and as mz should be freed after the
671 * del_queue, so we reserve the mz pointer first.
675 hdr_mz = txvq->virtio_net_hdr_mz;
677 virtio_dev_queue_release(vq);
678 rte_memzone_free(mz);
679 rte_memzone_free(hdr_mz);
683 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
687 * Requeue the discarded mbuf. This should always be
688 * successful since it was just dequeued.
690 error = virtqueue_enqueue_recv_refill(vq, m);
691 if (unlikely(error)) {
692 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
698 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
700 uint32_t s = mbuf->pkt_len;
701 struct ether_addr *ea;
704 stats->size_bins[1]++;
705 } else if (s > 64 && s < 1024) {
708 /* count zeros, and offset into correct bin */
709 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
710 stats->size_bins[bin]++;
713 stats->size_bins[0]++;
715 stats->size_bins[6]++;
717 stats->size_bins[7]++;
720 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
721 if (is_multicast_ether_addr(ea)) {
722 if (is_broadcast_ether_addr(ea))
729 /* Optionally fill offload information in structure */
731 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
733 struct rte_net_hdr_lens hdr_lens;
734 uint32_t hdrlen, ptype;
735 int l4_supported = 0;
738 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
741 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
743 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
744 m->packet_type = ptype;
745 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
746 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
747 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
750 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
751 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
752 if (hdr->csum_start <= hdrlen && l4_supported) {
753 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
755 /* Unknown proto or tunnel, do sw cksum. We can assume
756 * the cksum field is in the first segment since the
757 * buffers we provided to the host are large enough.
758 * In case of SCTP, this will be wrong since it's a CRC
759 * but there's nothing we can do.
763 rte_raw_cksum_mbuf(m, hdr->csum_start,
764 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
766 if (likely(csum != 0xffff))
768 off = hdr->csum_offset + hdr->csum_start;
769 if (rte_pktmbuf_data_len(m) >= off + 1)
770 *rte_pktmbuf_mtod_offset(m, uint16_t *,
773 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
774 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
777 /* GSO request, save required information in mbuf */
778 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
779 /* Check unsupported modes */
780 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
781 (hdr->gso_size == 0)) {
785 /* Update mss lengthes in mbuf */
786 m->tso_segsz = hdr->gso_size;
787 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
788 case VIRTIO_NET_HDR_GSO_TCPV4:
789 case VIRTIO_NET_HDR_GSO_TCPV6:
790 m->ol_flags |= PKT_RX_LRO | \
791 PKT_RX_L4_CKSUM_NONE;
802 rx_offload_enabled(struct virtio_hw *hw)
804 return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
805 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
806 vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
809 #define VIRTIO_MBUF_BURST_SZ 64
810 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
812 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
814 struct virtnet_rx *rxvq = rx_queue;
815 struct virtqueue *vq = rxvq->vq;
816 struct virtio_hw *hw;
817 struct rte_mbuf *rxm, *new_mbuf;
818 uint16_t nb_used, num, nb_rx;
819 uint32_t len[VIRTIO_MBUF_BURST_SZ];
820 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
822 uint32_t i, nb_enqueued;
825 struct virtio_net_hdr *hdr;
827 nb_used = VIRTQUEUE_NUSED(vq);
831 num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
832 num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
833 if (likely(num > DESC_PER_CACHELINE))
834 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
836 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
837 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
842 hdr_size = hw->vtnet_hdr_size;
843 offload = rx_offload_enabled(hw);
845 for (i = 0; i < num ; i++) {
848 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
850 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
851 PMD_RX_LOG(ERR, "Packet drop");
853 virtio_discard_rxbuf(vq, rxm);
854 rxvq->stats.errors++;
858 rxm->port = rxvq->port_id;
859 rxm->data_off = RTE_PKTMBUF_HEADROOM;
865 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
866 rxm->data_len = (uint16_t)(len[i] - hdr_size);
868 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
869 RTE_PKTMBUF_HEADROOM - hdr_size);
874 if (offload && virtio_rx_offload(rxm, hdr) < 0) {
875 virtio_discard_rxbuf(vq, rxm);
876 rxvq->stats.errors++;
880 VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
882 rx_pkts[nb_rx++] = rxm;
884 rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len;
885 virtio_update_packet_stats(&rxvq->stats, rxm);
888 rxvq->stats.packets += nb_rx;
890 /* Allocate new mbuf for the used descriptor */
892 while (likely(!virtqueue_full(vq))) {
893 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
894 if (unlikely(new_mbuf == NULL)) {
895 struct rte_eth_dev *dev
896 = &rte_eth_devices[rxvq->port_id];
897 dev->data->rx_mbuf_alloc_failed++;
900 error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
901 if (unlikely(error)) {
902 rte_pktmbuf_free(new_mbuf);
908 if (likely(nb_enqueued)) {
909 vq_update_avail_idx(vq);
911 if (unlikely(virtqueue_kick_prepare(vq))) {
912 virtqueue_notify(vq);
913 PMD_RX_LOG(DEBUG, "Notified");
921 virtio_recv_mergeable_pkts(void *rx_queue,
922 struct rte_mbuf **rx_pkts,
925 struct virtnet_rx *rxvq = rx_queue;
926 struct virtqueue *vq = rxvq->vq;
927 struct virtio_hw *hw;
928 struct rte_mbuf *rxm, *new_mbuf;
929 uint16_t nb_used, num, nb_rx;
930 uint32_t len[VIRTIO_MBUF_BURST_SZ];
931 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
932 struct rte_mbuf *prev;
934 uint32_t i, nb_enqueued;
941 nb_used = VIRTQUEUE_NUSED(vq);
945 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
954 hdr_size = hw->vtnet_hdr_size;
955 offload = rx_offload_enabled(hw);
957 while (i < nb_used) {
958 struct virtio_net_hdr_mrg_rxbuf *header;
960 if (nb_rx == nb_pkts)
963 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
969 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
970 PMD_RX_LOG(DEBUG, "packet len:%d", len[0]);
974 if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
975 PMD_RX_LOG(ERR, "Packet drop");
977 virtio_discard_rxbuf(vq, rxm);
978 rxvq->stats.errors++;
982 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
983 RTE_PKTMBUF_HEADROOM - hdr_size);
984 seg_num = header->num_buffers;
989 rxm->data_off = RTE_PKTMBUF_HEADROOM;
990 rxm->nb_segs = seg_num;
994 rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
995 rxm->data_len = (uint16_t)(len[0] - hdr_size);
997 rxm->port = rxvq->port_id;
998 rx_pkts[nb_rx] = rxm;
1001 if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
1002 virtio_discard_rxbuf(vq, rxm);
1003 rxvq->stats.errors++;
1007 seg_res = seg_num - 1;
1009 while (seg_res != 0) {
1011 * Get extra segments for current uncompleted packet.
1014 RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
1015 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1017 virtqueue_dequeue_burst_rx(vq,
1018 rcv_pkts, len, rcv_cnt);
1023 "No enough segments for packet.");
1025 virtio_discard_rxbuf(vq, rxm);
1026 rxvq->stats.errors++;
1032 while (extra_idx < rcv_cnt) {
1033 rxm = rcv_pkts[extra_idx];
1035 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1037 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1038 rxm->data_len = (uint16_t)(len[extra_idx]);
1044 rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
1051 rte_vlan_strip(rx_pkts[nb_rx]);
1053 VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
1054 rx_pkts[nb_rx]->data_len);
1056 rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;
1057 virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
1061 rxvq->stats.packets += nb_rx;
1063 /* Allocate new mbuf for the used descriptor */
1065 while (likely(!virtqueue_full(vq))) {
1066 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1067 if (unlikely(new_mbuf == NULL)) {
1068 struct rte_eth_dev *dev
1069 = &rte_eth_devices[rxvq->port_id];
1070 dev->data->rx_mbuf_alloc_failed++;
1073 error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
1074 if (unlikely(error)) {
1075 rte_pktmbuf_free(new_mbuf);
1081 if (likely(nb_enqueued)) {
1082 vq_update_avail_idx(vq);
1084 if (unlikely(virtqueue_kick_prepare(vq))) {
1085 virtqueue_notify(vq);
1086 PMD_RX_LOG(DEBUG, "Notified");
1094 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1096 struct virtnet_tx *txvq = tx_queue;
1097 struct virtqueue *vq = txvq->vq;
1098 struct virtio_hw *hw = vq->hw;
1099 uint16_t hdr_size = hw->vtnet_hdr_size;
1100 uint16_t nb_used, nb_tx;
1103 if (unlikely(nb_pkts < 1))
1106 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1107 nb_used = VIRTQUEUE_NUSED(vq);
1110 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1111 virtio_xmit_cleanup(vq, nb_used);
1113 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1114 struct rte_mbuf *txm = tx_pkts[nb_tx];
1115 int can_push = 0, use_indirect = 0, slots, need;
1117 /* Do VLAN tag insertion */
1118 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1119 error = rte_vlan_insert(&txm);
1120 if (unlikely(error)) {
1121 rte_pktmbuf_free(txm);
1126 /* optimize ring usage */
1127 if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) &&
1128 rte_mbuf_refcnt_read(txm) == 1 &&
1129 RTE_MBUF_DIRECT(txm) &&
1130 txm->nb_segs == 1 &&
1131 rte_pktmbuf_headroom(txm) >= hdr_size &&
1132 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1133 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1135 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1136 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1139 /* How many main ring entries are needed to this Tx?
1140 * any_layout => number of segments
1142 * default => number of segments + 1
1144 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1145 need = slots - vq->vq_free_cnt;
1147 /* Positive value indicates it need free vring descriptors */
1148 if (unlikely(need > 0)) {
1149 nb_used = VIRTQUEUE_NUSED(vq);
1151 need = RTE_MIN(need, (int)nb_used);
1153 virtio_xmit_cleanup(vq, need);
1154 need = slots - vq->vq_free_cnt;
1155 if (unlikely(need > 0)) {
1157 "No free tx descriptors to transmit");
1162 /* Enqueue Packet buffers */
1163 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
1165 txvq->stats.bytes += txm->pkt_len;
1166 virtio_update_packet_stats(&txvq->stats, txm);
1169 txvq->stats.packets += nb_tx;
1171 if (likely(nb_tx)) {
1172 vq_update_avail_idx(vq);
1174 if (unlikely(virtqueue_kick_prepare(vq))) {
1175 virtqueue_notify(vq);
1176 PMD_TX_LOG(DEBUG, "Notified backend after xmit");