X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=854af399e8062f72d7f15c9e083fae5f452b9b5d;hb=e56447980ece8ea01717f41ff82cf8122635e7ec;hp=fc0d84bd38f580c982187009f78ad942ada9240d;hpb=96cb6711939e936258c0632bb4912bcc6b15e47c;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index fc0d84bd38..854af399e8 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -39,20 +10,20 @@ #include #include -#include #include #include #include #include #include -#include +#include #include #include #include #include -#include #include #include +#include +#include #include "virtio_logs.h" #include "virtio_ethdev.h" @@ -70,7 +41,16 @@ #define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ ETH_TXQ_FLAGS_NOOFFLOADS) -static void +int +virtio_dev_rx_queue_done(void *rxq, uint16_t offset) +{ + struct virtnet_rx *rxvq = rxq; + struct virtqueue *vq = rxvq->vq; + + return VIRTQUEUE_NUSED(vq) >= offset; +} + +void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) { struct vring_desc *dp, *dp_tail; @@ -122,7 +102,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; if (unlikely(cookie == NULL)) { - PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", vq->vq_used_cons_idx); break; } @@ -207,18 +187,76 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) return 0; } +/* When doing TSO, the IP length is not included in the pseudo header + * checksum of the packet given to the PMD, but for virtio it is + * expected. + */ +static void +virtio_tso_fix_cksum(struct rte_mbuf *m) +{ + /* common case: header is not fragmented */ + if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + + m->l4_len)) { + struct ipv4_hdr *iph; + struct ipv6_hdr *ip6h; + struct tcp_hdr *th; + uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; + uint32_t tmp; + + iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); + th = RTE_PTR_ADD(iph, m->l3_len); + if ((iph->version_ihl >> 4) == 4) { + iph->hdr_checksum = 0; + iph->hdr_checksum = rte_ipv4_cksum(iph); + ip_len = iph->total_length; + ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - + m->l3_len); + } else { + ip6h = (struct ipv6_hdr *)iph; + ip_paylen = ip6h->payload_len; + } + + /* calculate the new phdr checksum not including ip_paylen */ + prev_cksum = th->cksum; + tmp = prev_cksum; + tmp += ip_paylen; + tmp = (tmp & 0xffff) + (tmp >> 16); + new_cksum = tmp; + + /* replace it in the packet */ + th->cksum = new_cksum; + } +} + +static inline int +tx_offload_enabled(struct virtio_hw *hw) +{ + return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) || + vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) || + vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6); +} + +/* avoid write operation when necessary, to lessen cache issues */ +#define ASSIGN_UNLESS_EQUAL(var, val) do { \ + if ((var) != (val)) \ + (var) = (val); \ +} while (0) + static inline void virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed, int use_indirect, int can_push) { + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; struct vq_desc_extra *dxp; struct virtqueue *vq = txvq->vq; struct vring_desc *start_dp; uint16_t seg_num = cookie->nb_segs; uint16_t head_idx, idx; uint16_t head_size = vq->hw->vtnet_hdr_size; - unsigned long offs; + struct virtio_net_hdr *hdr; + int offload; + offload = tx_offload_enabled(vq->hw); head_idx = vq->vq_desc_head_idx; idx = head_idx; dxp = &vq->vq_descx[idx]; @@ -228,10 +266,22 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, start_dp = vq->vq_ring.desc; if (can_push) { - /* put on zero'd transmit header (no offloads) */ - void *hdr = rte_pktmbuf_prepend(cookie, head_size); - - memset(hdr, 0, head_size); + /* prepend cannot fail, checked by caller */ + hdr = (struct virtio_net_hdr *) + rte_pktmbuf_prepend(cookie, head_size); + /* rte_pktmbuf_prepend() counts the hdr size to the pkt length, + * which is wrong. Below subtract restores correct pkt size. + */ + cookie->pkt_len -= head_size; + /* if offload disabled, it is not zeroed below, do it now */ + if (offload == 0) { + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); + } } else if (use_indirect) { /* setup tx ring slot to point to indirect * descriptor list stored in reserved region. @@ -239,14 +289,11 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, * the first slot in indirect ring is already preset * to point to the header in reserved region */ - struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; - - offs = idx * sizeof(struct virtio_tx_region) - + offsetof(struct virtio_tx_region, tx_indir); - - start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_indir, txr); start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc); start_dp[idx].flags = VRING_DESC_F_INDIRECT; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; /* loop below will fill in rest of the indirect elements */ start_dp = txr[idx].tx_indir; @@ -255,15 +302,59 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, /* setup first tx ring slot to point to header * stored in reserved region. */ - offs = idx * sizeof(struct virtio_tx_region) - + offsetof(struct virtio_tx_region, tx_hdr); - - start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); start_dp[idx].len = vq->hw->vtnet_hdr_size; start_dp[idx].flags = VRING_DESC_F_NEXT; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + idx = start_dp[idx].next; } + /* Checksum Offload / TSO */ + if (offload) { + if (cookie->ol_flags & PKT_TX_TCP_SEG) + cookie->ol_flags |= PKT_TX_TCP_CKSUM; + + switch (cookie->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct udp_hdr, + dgram_cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; + + case PKT_TX_TCP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct tcp_hdr, cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; + + default: + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + break; + } + + /* TCP Segmentation Offload */ + if (cookie->ol_flags & PKT_TX_TCP_SEG) { + virtio_tso_fix_cksum(cookie); + hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? + VIRTIO_NET_HDR_GSO_TCPV6 : + VIRTIO_NET_HDR_GSO_TCPV4; + hdr->gso_size = cookie->tso_segsz; + hdr->hdr_len = + cookie->l2_len + + cookie->l3_len + + cookie->l4_len; + } else { + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); + } + } + do { start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); start_dp[idx].len = cookie->data_len; @@ -281,228 +372,107 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, vq_update_avail_ring(vq, head_idx); } -static void -virtio_dev_vring_start(struct virtqueue *vq) -{ - int size = vq->vq_nentries; - struct vring *vr = &vq->vq_ring; - uint8_t *ring_mem = vq->vq_ring_virt_mem; - - PMD_INIT_FUNC_TRACE(); - - /* - * Reinitialise since virtio port might have been stopped and restarted - */ - memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size); - vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); - vq->vq_used_cons_idx = 0; - vq->vq_desc_head_idx = 0; - vq->vq_avail_idx = 0; - vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); - vq->vq_free_cnt = vq->vq_nentries; - memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); - - vring_desc_init(vr->desc, size); - - /* - * Disable device(host) interrupting guest - */ - virtqueue_disable_intr(vq); -} - void virtio_dev_cq_start(struct rte_eth_dev *dev) { struct virtio_hw *hw = dev->data->dev_private; if (hw->cvq && hw->cvq->vq) { - virtio_dev_vring_start(hw->cvq->vq); + rte_spinlock_init(&hw->cvq->lock); VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq); } } -void -virtio_dev_rxtx_start(struct rte_eth_dev *dev) -{ - /* - * Start receive and transmit vrings - * - Setup vring structure for all queues - * - Initialize descriptor for the rx vring - * - Allocate blank mbufs for the each rx descriptor - * - */ - uint16_t i; - uint16_t desc_idx; - struct virtio_hw *hw = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - - /* Start rx vring. */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct virtnet_rx *rxvq = dev->data->rx_queues[i]; - struct virtqueue *vq = rxvq->vq; - int error, nbufs; - struct rte_mbuf *m; - - virtio_dev_vring_start(vq); - if (rxvq->mpool == NULL) { - rte_exit(EXIT_FAILURE, - "Cannot allocate mbufs for rx virtqueue"); - } - - /* Allocate blank mbufs for the each rx descriptor */ - nbufs = 0; - error = ENOSPC; - - if (hw->use_simple_rxtx) { - for (desc_idx = 0; desc_idx < vq->vq_nentries; - desc_idx++) { - vq->vq_ring.avail->ring[desc_idx] = desc_idx; - vq->vq_ring.desc[desc_idx].flags = - VRING_DESC_F_WRITE; - } - } - - memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); - for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; - desc_idx++) { - vq->sw_ring[vq->vq_nentries + desc_idx] = - &rxvq->fake_mbuf; - } - - while (!virtqueue_full(vq)) { - m = rte_mbuf_raw_alloc(rxvq->mpool); - if (m == NULL) - break; - - /****************************************** - * Enqueue allocated buffers * - *******************************************/ - if (hw->use_simple_rxtx) - error = virtqueue_enqueue_recv_refill_simple(vq, m); - else - error = virtqueue_enqueue_recv_refill(vq, m); - - if (error) { - rte_pktmbuf_free(m); - break; - } - nbufs++; - } - - vq_update_avail_idx(vq); - - PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); - - VIRTQUEUE_DUMP(vq); - } - - /* Start tx vring. */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct virtnet_tx *txvq = dev->data->tx_queues[i]; - struct virtqueue *vq = txvq->vq; - - virtio_dev_vring_start(vq); - if (hw->use_simple_rxtx) { - uint16_t mid_idx = vq->vq_nentries >> 1; - - for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { - vq->vq_ring.avail->ring[desc_idx] = - desc_idx + mid_idx; - vq->vq_ring.desc[desc_idx + mid_idx].next = - desc_idx; - vq->vq_ring.desc[desc_idx + mid_idx].addr = - txvq->virtio_net_hdr_mem + - offsetof(struct virtio_tx_region, tx_hdr); - vq->vq_ring.desc[desc_idx + mid_idx].len = - vq->hw->vtnet_hdr_size; - vq->vq_ring.desc[desc_idx + mid_idx].flags = - VRING_DESC_F_NEXT; - vq->vq_ring.desc[desc_idx].flags = 0; - } - for (desc_idx = mid_idx; desc_idx < vq->vq_nentries; - desc_idx++) - vq->vq_ring.avail->ring[desc_idx] = desc_idx; - } - - VIRTQUEUE_DUMP(vq); - } -} - int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, + unsigned int socket_id __rte_unused, __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_rx *rxvq; - int ret; PMD_INIT_FUNC_TRACE(); - ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, (void **)&rxvq); - if (ret < 0) { - PMD_INIT_LOG(ERR, "rvq initialization failed"); - return ret; - } - /* Create mempool for rx mbuf allocation */ - rxvq->mpool = mp; + if (nb_desc == 0 || nb_desc > vq->vq_nentries) + nb_desc = vq->vq_nentries; + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); + rxvq = &vq->rxq; + rxvq->queue_id = queue_idx; + rxvq->mpool = mp; + if (rxvq->mpool == NULL) { + rte_exit(EXIT_FAILURE, + "Cannot allocate mbufs for rx virtqueue"); + } dev->data->rx_queues[queue_idx] = rxvq; - virtio_rxq_vec_setup(rxvq); - return 0; } -void -virtio_dev_rx_queue_release(void *rxq) +int +virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) { - struct virtnet_rx *rxvq = rxq; - struct virtqueue *vq; - const struct rte_memzone *mz; + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_rx *rxvq = &vq->rxq; + struct rte_mbuf *m; + uint16_t desc_idx; + int error, nbufs; - if (rxvq == NULL) - return; + PMD_INIT_FUNC_TRACE(); - /* - * rxvq is freed when vq is freed, and as mz should be freed after the - * del_queue, so we reserve the mz pointer first. - */ - vq = rxvq->vq; - mz = rxvq->mz; + /* Allocate blank mbufs for the each rx descriptor */ + nbufs = 0; - virtio_dev_queue_release(vq); - rte_memzone_free(mz); -} + if (hw->use_simple_rx) { + for (desc_idx = 0; desc_idx < vq->vq_nentries; + desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = desc_idx; + vq->vq_ring.desc[desc_idx].flags = + VRING_DESC_F_WRITE; + } + } -static void -virtio_update_rxtx_handler(struct rte_eth_dev *dev, - const struct rte_eth_txconf *tx_conf) -{ - uint8_t use_simple_rxtx = 0; - struct virtio_hw *hw = dev->data->dev_private; + memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); + for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; + desc_idx++) { + vq->sw_ring[vq->vq_nentries + desc_idx] = + &rxvq->fake_mbuf; + } -#if defined RTE_ARCH_X86 - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3)) - use_simple_rxtx = 1; -#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) - use_simple_rxtx = 1; -#endif - /* Use simple rx/tx func if single segment and no offloads */ - if (use_simple_rxtx && - (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS && - !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - PMD_INIT_LOG(INFO, "Using simple rx/tx path"); - dev->tx_pkt_burst = virtio_xmit_pkts_simple; - dev->rx_pkt_burst = virtio_recv_pkts_vec; - hw->use_simple_rxtx = use_simple_rxtx; + while (!virtqueue_full(vq)) { + m = rte_mbuf_raw_alloc(rxvq->mpool); + if (m == NULL) + break; + + /* Enqueue allocated buffers */ + if (hw->use_simple_rx) + error = virtqueue_enqueue_recv_refill_simple(vq, m); + else + error = virtqueue_enqueue_recv_refill(vq, m); + + if (error) { + rte_pktmbuf_free(m); + break; + } + nbufs++; } + + vq_update_avail_idx(vq); + + PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); + + virtio_rxq_vec_setup(rxvq); + + VIRTQUEUE_DUMP(vq); + + return 0; } /* @@ -516,32 +486,27 @@ int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, - unsigned int socket_id, + unsigned int socket_id __rte_unused, const struct rte_eth_txconf *tx_conf) { uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_tx *txvq; - struct virtqueue *vq; uint16_t tx_free_thresh; - int ret; PMD_INIT_FUNC_TRACE(); - if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS) - != ETH_TXQ_FLAGS_NOXSUMS) { - PMD_INIT_LOG(ERR, "TX checksum offload not supported\n"); - return -EINVAL; - } + /* cannot use simple rxtx funcs with multisegs or offloads */ + if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS) + hw->use_simple_tx = 0; - virtio_update_rxtx_handler(dev, tx_conf); + if (nb_desc == 0 || nb_desc > vq->vq_nentries) + nb_desc = vq->vq_nentries; + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); - ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, (void **)&txvq); - if (ret < 0) { - PMD_INIT_LOG(ERR, "tvq initialization failed"); - return ret; - } - vq = txvq->vq; + txvq = &vq->txq; + txvq->queue_id = queue_idx; tx_free_thresh = tx_conf->tx_free_thresh; if (tx_free_thresh == 0) @@ -563,28 +528,42 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, return 0; } -void -virtio_dev_tx_queue_release(void *txq) +int +virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, + uint16_t queue_idx) { - struct virtnet_tx *txvq = txq; - struct virtqueue *vq; - const struct rte_memzone *mz; - const struct rte_memzone *hdr_mz; + uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + uint16_t mid_idx = vq->vq_nentries >> 1; + struct virtnet_tx *txvq = &vq->txq; + uint16_t desc_idx; - if (txvq == NULL) - return; + PMD_INIT_FUNC_TRACE(); - /* - * txvq is freed when vq is freed, and as mz should be freed after the - * del_queue, so we reserve the mz pointer first. - */ - vq = txvq->vq; - mz = txvq->mz; - hdr_mz = txvq->virtio_net_hdr_mz; + if (hw->use_simple_tx) { + for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = + desc_idx + mid_idx; + vq->vq_ring.desc[desc_idx + mid_idx].next = + desc_idx; + vq->vq_ring.desc[desc_idx + mid_idx].addr = + txvq->virtio_net_hdr_mem + + offsetof(struct virtio_tx_region, tx_hdr); + vq->vq_ring.desc[desc_idx + mid_idx].len = + vq->hw->vtnet_hdr_size; + vq->vq_ring.desc[desc_idx + mid_idx].flags = + VRING_DESC_F_NEXT; + vq->vq_ring.desc[desc_idx].flags = 0; + } + for (desc_idx = mid_idx; desc_idx < vq->vq_nentries; + desc_idx++) + vq->vq_ring.avail->ring[desc_idx] = desc_idx; + } - virtio_dev_queue_release(vq); - rte_memzone_free(mz); - rte_memzone_free(hdr_mz); + VIRTQUEUE_DUMP(vq); + + return 0; } static void @@ -666,7 +645,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) * In case of SCTP, this will be wrong since it's a CRC * but there's nothing we can do. */ - uint16_t csum, off; + uint16_t csum = 0, off; rte_raw_cksum_mbuf(m, hdr->csum_start, rte_pktmbuf_pkt_len(m) - hdr->csum_start, @@ -682,13 +661,36 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; } + /* GSO request, save required information in mbuf */ + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + /* Check unsupported modes */ + if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) || + (hdr->gso_size == 0)) { + return -EINVAL; + } + + /* Update mss lengthes in mbuf */ + m->tso_segsz = hdr->gso_size; + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_TCPV6: + m->ol_flags |= PKT_RX_LRO | \ + PKT_RX_L4_CKSUM_NONE; + break; + default: + return -EINVAL; + } + } + return 0; } static inline int rx_offload_enabled(struct virtio_hw *hw) { - return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM); + return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) || + vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) || + vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6); } #define VIRTIO_MBUF_BURST_SZ 64 @@ -698,7 +700,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; - struct virtio_hw *hw; + struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; @@ -709,20 +711,23 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) int offload; struct virtio_net_hdr *hdr; + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); - num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); - num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); + num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; + if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) + num = VIRTIO_MBUF_BURST_SZ; if (likely(num > DESC_PER_CACHELINE)) num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); - hw = vq->hw; - nb_rx = 0; nb_enqueued = 0; hdr_size = hw->vtnet_hdr_size; offload = rx_offload_enabled(hw); @@ -745,8 +750,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->ol_flags = 0; rxm->vlan_tci = 0; - rxm->nb_segs = 1; - rxm->next = NULL; rxm->pkt_len = (uint32_t)(len[i] - hdr_size); rxm->data_len = (uint16_t)(len[i] - hdr_size); @@ -766,7 +769,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_pkts[nb_rx++] = rxm; - rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len; + rxvq->stats.bytes += rxm->pkt_len; virtio_update_packet_stats(&rxvq->stats, rxm); } @@ -809,7 +812,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, { struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; - struct virtio_hw *hw; + struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; @@ -823,14 +826,16 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint32_t hdr_size; int offload; + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); PMD_RX_LOG(DEBUG, "used:%d", nb_used); - hw = vq->hw; - nb_rx = 0; i = 0; nb_enqueued = 0; seg_num = 0; @@ -873,7 +878,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->nb_segs = seg_num; - rxm->next = NULL; rxm->ol_flags = 0; rxm->vlan_tci = 0; rxm->pkt_len = (uint32_t)(len[0] - hdr_size); @@ -918,7 +922,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm = rcv_pkts[extra_idx]; rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; - rxm->next = NULL; rxm->pkt_len = (uint32_t)(len[extra_idx]); rxm->data_len = (uint16_t)(len[extra_idx]); @@ -982,9 +985,12 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct virtqueue *vq = txvq->vq; struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; - uint16_t nb_used, nb_tx; + uint16_t nb_used, nb_tx = 0; int error; + if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) + return nb_tx; + if (unlikely(nb_pkts < 1)) return nb_pkts; @@ -1009,7 +1015,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* optimize ring usage */ - if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) && + if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && rte_mbuf_refcnt_read(txm) == 1 && RTE_MBUF_DIRECT(txm) && txm->nb_segs == 1 &&