X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=3805e217a602953f852dec05a900d50b84c37228;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=5770fa2957075c66675113fc6e6bf9725aaf8b31;hpb=76d4c652e07d5553eed74da84b4de91e5c6b1870;p=dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 5770fa2957..3805e217a6 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -39,23 +10,28 @@ #include #include -#include #include #include #include #include #include -#include +#include #include #include #include #include +#include +#include +#include +#include #include "virtio_logs.h" #include "virtio_ethdev.h" #include "virtio_pci.h" #include "virtqueue.h" #include "virtio_rxtx.h" +#include "virtio_rxtx_simple.h" +#include "virtio_ring.h" #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) @@ -63,26 +39,36 @@ #define VIRTIO_DUMP_PACKET(m, len) do { } while (0) #endif +int +virtio_dev_rx_queue_done(void *rxq, uint16_t offset) +{ + struct virtnet_rx *rxvq = rxq; + struct virtqueue *vq = rxvq->vq; -#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) + return VIRTQUEUE_NUSED(vq) >= offset; +} -static int use_simple_rxtx; +void +vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num) +{ + vq->vq_free_cnt += num; + vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1); +} -static void +void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) { struct vring_desc *dp, *dp_tail; struct vq_desc_extra *dxp; uint16_t desc_idx_last = desc_idx; - dp = &vq->vq_ring.desc[desc_idx]; + dp = &vq->vq_split.ring.desc[desc_idx]; dxp = &vq->vq_descx[desc_idx]; vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { while (dp->flags & VRING_DESC_F_NEXT) { desc_idx_last = dp->next; - dp = &vq->vq_ring.desc[dp->next]; + dp = &vq->vq_split.ring.desc[dp->next]; } } dxp->ndescs = 0; @@ -95,7 +81,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { vq->vq_desc_head_idx = desc_idx; } else { - dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; + dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx]; dp_tail->next = desc_idx; } @@ -103,6 +89,65 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) dp->next = VQ_RING_DESC_CHAIN_END; } +static void +vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) +{ + struct vq_desc_extra *dxp; + + dxp = &vq->vq_descx[id]; + vq->vq_free_cnt += dxp->ndescs; + + if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_head_idx = id; + else + vq->vq_descx[vq->vq_desc_tail_idx].next = id; + + vq->vq_desc_tail_idx = id; + dxp->next = VQ_RING_DESC_CHAIN_END; +} + +static uint16_t +virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq, + struct rte_mbuf **rx_pkts, + uint32_t *len, + uint16_t num) +{ + struct rte_mbuf *cookie; + uint16_t used_idx; + uint16_t id; + struct vring_packed_desc *desc; + uint16_t i; + + desc = vq->vq_packed.ring.desc; + + for (i = 0; i < num; i++) { + used_idx = vq->vq_used_cons_idx; + if (!desc_is_used(&desc[used_idx], vq)) + return i; + virtio_rmb(vq->hw->weak_barriers); + len[i] = desc[used_idx].len; + id = desc[used_idx].id; + cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie; + if (unlikely(cookie == NULL)) { + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", + vq->vq_used_cons_idx); + break; + } + rte_prefetch0(cookie); + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); + rx_pkts[i] = cookie; + + vq->vq_free_cnt++; + vq->vq_used_cons_idx++; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_packed.used_wrap_counter ^= 1; + } + } + + return i; +} + static uint16_t virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num) @@ -115,13 +160,13 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, /* Caller does the check */ for (i = 0; i < num ; i++) { used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; + uep = &vq->vq_split.ring.used->ring[used_idx]; desc_idx = (uint16_t) uep->id; len[i] = uep->len; cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; if (unlikely(cookie == NULL)) { - PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", vq->vq_used_cons_idx); break; } @@ -137,11 +182,117 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, return i; } +static uint16_t +virtqueue_dequeue_rx_inorder(struct virtqueue *vq, + struct rte_mbuf **rx_pkts, + uint32_t *len, + uint16_t num) +{ + struct vring_used_elem *uep; + struct rte_mbuf *cookie; + uint16_t used_idx = 0; + uint16_t i; + + if (unlikely(num == 0)) + return 0; + + for (i = 0; i < num; i++) { + used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1); + /* Desc idx same as used idx */ + uep = &vq->vq_split.ring.used->ring[used_idx]; + len[i] = uep->len; + cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie; + + if (unlikely(cookie == NULL)) { + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", + vq->vq_used_cons_idx); + break; + } + + rte_prefetch0(cookie); + rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); + rx_pkts[i] = cookie; + vq->vq_used_cons_idx++; + vq->vq_descx[used_idx].cookie = NULL; + } + + vq_ring_free_inorder(vq, used_idx, i); + return i; +} + #ifndef DEFAULT_TX_FREE_THRESH #define DEFAULT_TX_FREE_THRESH 32 #endif +static void +virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) +{ + uint16_t used_idx, id, curr_id, free_cnt = 0; + uint16_t size = vq->vq_nentries; + struct vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct vq_desc_extra *dxp; + + used_idx = vq->vq_used_cons_idx; + while (num > 0 && desc_is_used(&desc[used_idx], vq)) { + virtio_rmb(vq->hw->weak_barriers); + id = desc[used_idx].id; + do { + curr_id = used_idx; + dxp = &vq->vq_descx[used_idx]; + used_idx += dxp->ndescs; + free_cnt += dxp->ndescs; + num -= dxp->ndescs; + if (used_idx >= size) { + used_idx -= size; + vq->vq_packed.used_wrap_counter ^= 1; + } + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } while (curr_id != id); + } + vq->vq_used_cons_idx = used_idx; + vq->vq_free_cnt += free_cnt; +} + +static void +virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) +{ + uint16_t used_idx, id; + uint16_t size = vq->vq_nentries; + struct vring_packed_desc *desc = vq->vq_packed.ring.desc; + struct vq_desc_extra *dxp; + + used_idx = vq->vq_used_cons_idx; + while (num-- && desc_is_used(&desc[used_idx], vq)) { + virtio_rmb(vq->hw->weak_barriers); + id = desc[used_idx].id; + dxp = &vq->vq_descx[id]; + vq->vq_used_cons_idx += dxp->ndescs; + if (vq->vq_used_cons_idx >= size) { + vq->vq_used_cons_idx -= size; + vq->vq_packed.used_wrap_counter ^= 1; + } + vq_ring_free_id_packed(vq, id); + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + used_idx = vq->vq_used_cons_idx; + } +} + /* Cleanup from completed transmits. */ +static inline void +virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order) +{ + if (in_order) + virtio_xmit_cleanup_inorder_packed(vq, num); + else + virtio_xmit_cleanup_normal_packed(vq, num); +} + static void virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) { @@ -151,7 +302,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) struct vq_desc_extra *dxp; used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; + uep = &vq->vq_split.ring.used->ring[used_idx]; desc_idx = (uint16_t) uep->id; dxp = &vq->vq_descx[desc_idx]; @@ -165,361 +316,748 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) } } +/* Cleanup from completed inorder transmits. */ +static void +virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num) +{ + uint16_t i, idx = vq->vq_used_cons_idx; + int16_t free_cnt = 0; + struct vq_desc_extra *dxp = NULL; + + if (unlikely(num == 0)) + return; + + for (i = 0; i < num; i++) { + dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)]; + free_cnt += dxp->ndescs; + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } + + vq->vq_free_cnt += free_cnt; + vq->vq_used_cons_idx = idx; +} static inline int -virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) +virtqueue_enqueue_refill_inorder(struct virtqueue *vq, + struct rte_mbuf **cookies, + uint16_t num) { struct vq_desc_extra *dxp; struct virtio_hw *hw = vq->hw; struct vring_desc *start_dp; - uint16_t needed = 1; - uint16_t head_idx, idx; + uint16_t head_idx, idx, i = 0; if (unlikely(vq->vq_free_cnt == 0)) return -ENOSPC; - if (unlikely(vq->vq_free_cnt < needed)) + if (unlikely(vq->vq_free_cnt < num)) return -EMSGSIZE; - head_idx = vq->vq_desc_head_idx; - if (unlikely(head_idx >= vq->vq_nentries)) - return -EFAULT; - - idx = head_idx; - dxp = &vq->vq_descx[idx]; - dxp->cookie = (void *)cookie; - dxp->ndescs = needed; - - start_dp = vq->vq_ring.desc; - start_dp[idx].addr = - (uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - - hw->vtnet_hdr_size); - start_dp[idx].len = - cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; - start_dp[idx].flags = VRING_DESC_F_WRITE; - idx = start_dp[idx].next; - vq->vq_desc_head_idx = idx; - if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - vq->vq_desc_tail_idx = idx; - vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); - vq_update_avail_ring(vq, head_idx); + head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1); + start_dp = vq->vq_split.ring.desc; + + while (i < num) { + idx = head_idx & (vq->vq_nentries - 1); + dxp = &vq->vq_descx[idx]; + dxp->cookie = (void *)cookies[i]; + dxp->ndescs = 1; + + start_dp[idx].addr = + VIRTIO_MBUF_ADDR(cookies[i], vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + start_dp[idx].len = + cookies[i]->buf_len - + RTE_PKTMBUF_HEADROOM + + hw->vtnet_hdr_size; + start_dp[idx].flags = VRING_DESC_F_WRITE; + + vq_update_avail_ring(vq, idx); + head_idx++; + i++; + } + vq->vq_desc_head_idx += num; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); return 0; } -static int -virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) +static inline int +virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie, + uint16_t num) { struct vq_desc_extra *dxp; - struct vring_desc *start_dp; - uint16_t seg_num = cookie->nb_segs; - uint16_t needed = 1 + seg_num; - uint16_t head_idx, idx; - size_t head_size = txvq->hw->vtnet_hdr_size; + struct virtio_hw *hw = vq->hw; + struct vring_desc *start_dp = vq->vq_split.ring.desc; + uint16_t idx, i; - if (unlikely(txvq->vq_free_cnt == 0)) + if (unlikely(vq->vq_free_cnt == 0)) return -ENOSPC; - if (unlikely(txvq->vq_free_cnt < needed)) + if (unlikely(vq->vq_free_cnt < num)) return -EMSGSIZE; - head_idx = txvq->vq_desc_head_idx; - if (unlikely(head_idx >= txvq->vq_nentries)) - return -EFAULT; - - idx = head_idx; - dxp = &txvq->vq_descx[idx]; - dxp->cookie = (void *)cookie; - dxp->ndescs = needed; - start_dp = txvq->vq_ring.desc; - start_dp[idx].addr = - txvq->virtio_net_hdr_mem + idx * head_size; - start_dp[idx].len = head_size; - start_dp[idx].flags = VRING_DESC_F_NEXT; + if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries)) + return -EFAULT; - for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) { - idx = start_dp[idx].next; - start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie); - start_dp[idx].len = cookie->data_len; - start_dp[idx].flags = VRING_DESC_F_NEXT; - cookie = cookie->next; + for (i = 0; i < num; i++) { + idx = vq->vq_desc_head_idx; + dxp = &vq->vq_descx[idx]; + dxp->cookie = (void *)cookie[i]; + dxp->ndescs = 1; + + start_dp[idx].addr = + VIRTIO_MBUF_ADDR(cookie[i], vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + start_dp[idx].len = + cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM + + hw->vtnet_hdr_size; + start_dp[idx].flags = VRING_DESC_F_WRITE; + vq->vq_desc_head_idx = start_dp[idx].next; + vq_update_avail_ring(vq, idx); + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) { + vq->vq_desc_tail_idx = vq->vq_desc_head_idx; + break; + } } - start_dp[idx].flags &= ~VRING_DESC_F_NEXT; - idx = start_dp[idx].next; - txvq->vq_desc_head_idx = idx; - if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - txvq->vq_desc_tail_idx = idx; - txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); - vq_update_avail_ring(txvq, head_idx); + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); return 0; } -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) +static inline int +virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, + struct rte_mbuf **cookie, uint16_t num) { - struct rte_mbuf *m; + struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc; + uint16_t flags = vq->vq_packed.cached_flags; + struct virtio_hw *hw = vq->hw; + struct vq_desc_extra *dxp; + uint16_t idx; + int i; - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); + if (unlikely(vq->vq_free_cnt == 0)) + return -ENOSPC; + if (unlikely(vq->vq_free_cnt < num)) + return -EMSGSIZE; - return m; + for (i = 0; i < num; i++) { + idx = vq->vq_avail_idx; + dxp = &vq->vq_descx[idx]; + dxp->cookie = (void *)cookie[i]; + dxp->ndescs = 1; + + start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM + + hw->vtnet_hdr_size; + + vq->vq_desc_head_idx = dxp->next; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = vq->vq_desc_head_idx; + virtio_wmb(hw->weak_barriers); + start_dp[idx].flags = flags; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + flags = vq->vq_packed.cached_flags; + } + } + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); + return 0; } +/* When doing TSO, the IP length is not included in the pseudo header + * checksum of the packet given to the PMD, but for virtio it is + * expected. + */ static void -virtio_dev_vring_start(struct virtqueue *vq, int queue_type) +virtio_tso_fix_cksum(struct rte_mbuf *m) { - struct rte_mbuf *m; - int i, nbufs, error, size = vq->vq_nentries; - struct vring *vr = &vq->vq_ring; - uint8_t *ring_mem = vq->vq_ring_virt_mem; + /* common case: header is not fragmented */ + if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + + m->l4_len)) { + struct ipv4_hdr *iph; + struct ipv6_hdr *ip6h; + struct tcp_hdr *th; + uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; + uint32_t tmp; + + iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); + th = RTE_PTR_ADD(iph, m->l3_len); + if ((iph->version_ihl >> 4) == 4) { + iph->hdr_checksum = 0; + iph->hdr_checksum = rte_ipv4_cksum(iph); + ip_len = iph->total_length; + ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - + m->l3_len); + } else { + ip6h = (struct ipv6_hdr *)iph; + ip_paylen = ip6h->payload_len; + } - PMD_INIT_FUNC_TRACE(); + /* calculate the new phdr checksum not including ip_paylen */ + prev_cksum = th->cksum; + tmp = prev_cksum; + tmp += ip_paylen; + tmp = (tmp & 0xffff) + (tmp >> 16); + new_cksum = tmp; - /* - * Reinitialise since virtio port might have been stopped and restarted - */ - memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size); - vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); - vq->vq_used_cons_idx = 0; - vq->vq_desc_head_idx = 0; - vq->vq_avail_idx = 0; - vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); - vq->vq_free_cnt = vq->vq_nentries; - memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); - - /* Chain all the descriptors in the ring with an END */ - for (i = 0; i < size - 1; i++) - vr->desc[i].next = (uint16_t)(i + 1); - vr->desc[i].next = VQ_RING_DESC_CHAIN_END; + /* replace it in the packet */ + th->cksum = new_cksum; + } +} - /* - * Disable device(host) interrupting guest - */ - virtqueue_disable_intr(vq); - - /* Only rx virtqueue needs mbufs to be allocated at initialization */ - if (queue_type == VTNET_RQ) { - if (vq->mpool == NULL) - rte_exit(EXIT_FAILURE, - "Cannot allocate initial mbufs for rx virtqueue"); - - /* Allocate blank mbufs for the each rx descriptor */ - nbufs = 0; - error = ENOSPC; - - if (use_simple_rxtx) - for (i = 0; i < vq->vq_nentries; i++) { - vq->vq_ring.avail->ring[i] = i; - vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; - } - memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf)); - for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++) - vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf; +/* avoid write operation when necessary, to lessen cache issues */ +#define ASSIGN_UNLESS_EQUAL(var, val) do { \ + if ((var) != (val)) \ + (var) = (val); \ +} while (0) + +#define virtqueue_clear_net_hdr(_hdr) do { \ + ASSIGN_UNLESS_EQUAL((_hdr)->csum_start, 0); \ + ASSIGN_UNLESS_EQUAL((_hdr)->csum_offset, 0); \ + ASSIGN_UNLESS_EQUAL((_hdr)->flags, 0); \ + ASSIGN_UNLESS_EQUAL((_hdr)->gso_type, 0); \ + ASSIGN_UNLESS_EQUAL((_hdr)->gso_size, 0); \ + ASSIGN_UNLESS_EQUAL((_hdr)->hdr_len, 0); \ +} while (0) + +static inline void +virtqueue_xmit_offload(struct virtio_net_hdr *hdr, + struct rte_mbuf *cookie, + bool offload) +{ + if (offload) { + if (cookie->ol_flags & PKT_TX_TCP_SEG) + cookie->ol_flags |= PKT_TX_TCP_CKSUM; + + switch (cookie->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct udp_hdr, + dgram_cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; - while (!virtqueue_full(vq)) { - m = rte_rxmbuf_alloc(vq->mpool); - if (m == NULL) - break; + case PKT_TX_TCP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct tcp_hdr, cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; - /****************************************** - * Enqueue allocated buffers * - *******************************************/ - if (use_simple_rxtx) - error = virtqueue_enqueue_recv_refill_simple(vq, m); - else - error = virtqueue_enqueue_recv_refill(vq, m); - if (error) { - rte_pktmbuf_free(m); - break; - } - nbufs++; + default: + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + break; } - vq_update_avail_idx(vq); - - PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); - - VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, - vq->vq_queue_index); - VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN, - vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); - } else if (queue_type == VTNET_TQ) { - if (use_simple_rxtx) { - int mid_idx = vq->vq_nentries >> 1; - for (i = 0; i < mid_idx; i++) { - vq->vq_ring.avail->ring[i] = i + mid_idx; - vq->vq_ring.desc[i + mid_idx].next = i; - vq->vq_ring.desc[i + mid_idx].addr = - vq->virtio_net_hdr_mem + - mid_idx * vq->hw->vtnet_hdr_size; - vq->vq_ring.desc[i + mid_idx].len = - vq->hw->vtnet_hdr_size; - vq->vq_ring.desc[i + mid_idx].flags = - VRING_DESC_F_NEXT; - vq->vq_ring.desc[i].flags = 0; - } - for (i = mid_idx; i < vq->vq_nentries; i++) - vq->vq_ring.avail->ring[i] = i; + /* TCP Segmentation Offload */ + if (cookie->ol_flags & PKT_TX_TCP_SEG) { + virtio_tso_fix_cksum(cookie); + hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? + VIRTIO_NET_HDR_GSO_TCPV6 : + VIRTIO_NET_HDR_GSO_TCPV4; + hdr->gso_size = cookie->tso_segsz; + hdr->hdr_len = + cookie->l2_len + + cookie->l3_len + + cookie->l4_len; + } else { + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); } - - VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, - vq->vq_queue_index); - VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN, - vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); - } else { - VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_SEL, - vq->vq_queue_index); - VIRTIO_WRITE_REG_4(vq->hw, VIRTIO_PCI_QUEUE_PFN, - vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); } } -void -virtio_dev_cq_start(struct rte_eth_dev *dev) +static inline void +virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, + struct rte_mbuf **cookies, + uint16_t num) { - struct virtio_hw *hw = dev->data->dev_private; + struct vq_desc_extra *dxp; + struct virtqueue *vq = txvq->vq; + struct vring_desc *start_dp; + struct virtio_net_hdr *hdr; + uint16_t idx; + uint16_t head_size = vq->hw->vtnet_hdr_size; + uint16_t i = 0; - if (hw->cvq) { - virtio_dev_vring_start(hw->cvq, VTNET_CQ); - VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq); - } -} + idx = vq->vq_desc_head_idx; + start_dp = vq->vq_split.ring.desc; -void -virtio_dev_rxtx_start(struct rte_eth_dev *dev) -{ - /* - * Start receive and transmit vrings - * - Setup vring structure for all queues - * - Initialize descriptor for the rx vring - * - Allocate blank mbufs for the each rx descriptor - * - */ - int i; + while (i < num) { + idx = idx & (vq->vq_nentries - 1); + dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; + dxp->cookie = (void *)cookies[i]; + dxp->ndescs = 1; - PMD_INIT_FUNC_TRACE(); + hdr = (struct virtio_net_hdr *) + rte_pktmbuf_prepend(cookies[i], head_size); + cookies[i]->pkt_len -= head_size; - /* Start rx vring. */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); - } + /* if offload disabled, hdr is not zeroed yet, do it now */ + if (!vq->hw->has_tx_offload) + virtqueue_clear_net_hdr(hdr); + else + virtqueue_xmit_offload(hdr, cookies[i], true); - /* Start tx vring. */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); - } + start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq); + start_dp[idx].len = cookies[i]->data_len; + start_dp[idx].flags = 0; + + vq_update_avail_ring(vq, idx); + + idx++; + i++; + }; + + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); + vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1); } -int -virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, - uint16_t queue_idx, - uint16_t nb_desc, - unsigned int socket_id, - __rte_unused const struct rte_eth_rxconf *rx_conf, - struct rte_mempool *mp) +static inline void +virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, + struct rte_mbuf *cookie, + int in_order) { - uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; - struct virtqueue *vq; - int ret; - - PMD_INIT_FUNC_TRACE(); - ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, &vq); - if (ret < 0) { - PMD_INIT_LOG(ERR, "rvq initialization failed"); - return ret; + struct virtqueue *vq = txvq->vq; + struct vring_packed_desc *dp; + struct vq_desc_extra *dxp; + uint16_t idx, id, flags; + uint16_t head_size = vq->hw->vtnet_hdr_size; + struct virtio_net_hdr *hdr; + + id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; + idx = vq->vq_avail_idx; + dp = &vq->vq_packed.ring.desc[idx]; + + dxp = &vq->vq_descx[id]; + dxp->ndescs = 1; + dxp->cookie = cookie; + + flags = vq->vq_packed.cached_flags; + + /* prepend cannot fail, checked by caller */ + hdr = (struct virtio_net_hdr *) + rte_pktmbuf_prepend(cookie, head_size); + cookie->pkt_len -= head_size; + + /* if offload disabled, hdr is not zeroed yet, do it now */ + if (!vq->hw->has_tx_offload) + virtqueue_clear_net_hdr(hdr); + else + virtqueue_xmit_offload(hdr, cookie, true); + + dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); + dp->len = cookie->data_len; + dp->id = id; + + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED; } - /* Create mempool for rx mbuf allocation */ - vq->mpool = mp; - - dev->data->rx_queues[queue_idx] = vq; + vq->vq_free_cnt--; - virtio_rxq_vec_setup(vq); + if (!in_order) { + vq->vq_desc_head_idx = dxp->next; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; + } - return 0; + virtio_wmb(vq->hw->weak_barriers); + dp->flags = flags; } -void -virtio_dev_rx_queue_release(void *rxq) +static inline void +virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, + uint16_t needed, int can_push, int in_order) { - virtio_dev_queue_release(rxq); -} + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; + struct vq_desc_extra *dxp; + struct virtqueue *vq = txvq->vq; + struct vring_packed_desc *start_dp, *head_dp; + uint16_t idx, id, head_idx, head_flags; + uint16_t head_size = vq->hw->vtnet_hdr_size; + struct virtio_net_hdr *hdr; + uint16_t prev; -/* - * struct rte_eth_dev *dev: Used to update dev - * uint16_t nb_desc: Defaults to values read from config space - * unsigned int socket_id: Used to allocate memzone - * const struct rte_eth_txconf *tx_conf: Used to setup tx engine - * uint16_t queue_idx: Just used as an index in dev txq list - */ -int -virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, - uint16_t queue_idx, - uint16_t nb_desc, - unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) -{ - uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; - struct virtio_hw *hw = dev->data->dev_private; - struct virtqueue *vq; - uint16_t tx_free_thresh; - int ret; + id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; - PMD_INIT_FUNC_TRACE(); + dxp = &vq->vq_descx[id]; + dxp->ndescs = needed; + dxp->cookie = cookie; - if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS) - != ETH_TXQ_FLAGS_NOXSUMS) { - PMD_INIT_LOG(ERR, "TX checksum offload not supported\n"); - return -EINVAL; - } + head_idx = vq->vq_avail_idx; + idx = head_idx; + prev = head_idx; + start_dp = vq->vq_packed.ring.desc; + + head_dp = &vq->vq_packed.ring.desc[idx]; + head_flags = cookie->next ? VRING_DESC_F_NEXT : 0; + head_flags |= vq->vq_packed.cached_flags; + + if (can_push) { + /* prepend cannot fail, checked by caller */ + hdr = (struct virtio_net_hdr *) + rte_pktmbuf_prepend(cookie, head_size); + /* rte_pktmbuf_prepend() counts the hdr size to the pkt length, + * which is wrong. Below subtract restores correct pkt size. + */ + cookie->pkt_len -= head_size; - /* Use simple rx/tx func if single segment and no offloads */ - if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS && - !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - PMD_INIT_LOG(INFO, "Using simple rx/tx path"); - dev->tx_pkt_burst = virtio_xmit_pkts_simple; - dev->rx_pkt_burst = virtio_recv_pkts_vec; - use_simple_rxtx = 1; + /* if offload disabled, it is not zeroed below, do it now */ + if (!vq->hw->has_tx_offload) + virtqueue_clear_net_hdr(hdr); + } else { + /* setup first tx ring slot to point to header + * stored in reserved region. + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + start_dp[idx].len = vq->hw->vtnet_hdr_size; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + idx++; + if (idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } } - ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, &vq); - if (ret < 0) { - PMD_INIT_LOG(ERR, "rvq initialization failed"); - return ret; - } + virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); - tx_free_thresh = tx_conf->tx_free_thresh; - if (tx_free_thresh == 0) - tx_free_thresh = - RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH); + do { + uint16_t flags; - if (tx_free_thresh >= (vq->vq_nentries - 3)) { - RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the " - "number of TX entries minus 3 (%u)." - " (tx_free_thresh=%u port=%u queue=%u)\n", - vq->vq_nentries - 3, - tx_free_thresh, dev->data->port_id, queue_idx); - return -EINVAL; - } + start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); + start_dp[idx].len = cookie->data_len; + if (likely(idx != head_idx)) { + flags = cookie->next ? VRING_DESC_F_NEXT : 0; + flags |= vq->vq_packed.cached_flags; + start_dp[idx].flags = flags; + } + prev = idx; + idx++; + if (idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + } while ((cookie = cookie->next) != NULL); - vq->vq_free_thresh = tx_free_thresh; + start_dp[prev].id = id; - dev->data->tx_queues[queue_idx] = vq; - return 0; -} + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); + vq->vq_avail_idx = idx; -void -virtio_dev_tx_queue_release(void *txq) -{ - virtio_dev_queue_release(txq); + if (!in_order) { + vq->vq_desc_head_idx = dxp->next; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; + } + + virtio_wmb(vq->hw->weak_barriers); + head_dp->flags = head_flags; } -static void +static inline void +virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, + uint16_t needed, int use_indirect, int can_push, + int in_order) +{ + struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; + struct vq_desc_extra *dxp; + struct virtqueue *vq = txvq->vq; + struct vring_desc *start_dp; + uint16_t seg_num = cookie->nb_segs; + uint16_t head_idx, idx; + uint16_t head_size = vq->hw->vtnet_hdr_size; + struct virtio_net_hdr *hdr; + + head_idx = vq->vq_desc_head_idx; + idx = head_idx; + if (in_order) + dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)]; + else + dxp = &vq->vq_descx[idx]; + dxp->cookie = (void *)cookie; + dxp->ndescs = needed; + + start_dp = vq->vq_split.ring.desc; + + if (can_push) { + /* prepend cannot fail, checked by caller */ + hdr = (struct virtio_net_hdr *) + rte_pktmbuf_prepend(cookie, head_size); + /* rte_pktmbuf_prepend() counts the hdr size to the pkt length, + * which is wrong. Below subtract restores correct pkt size. + */ + cookie->pkt_len -= head_size; + + /* if offload disabled, it is not zeroed below, do it now */ + if (!vq->hw->has_tx_offload) + virtqueue_clear_net_hdr(hdr); + } else if (use_indirect) { + /* setup tx ring slot to point to indirect + * descriptor list stored in reserved region. + * + * the first slot in indirect ring is already preset + * to point to the header in reserved region + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_indir, txr); + start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc); + start_dp[idx].flags = VRING_DESC_F_INDIRECT; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + + /* loop below will fill in rest of the indirect elements */ + start_dp = txr[idx].tx_indir; + idx = 1; + } else { + /* setup first tx ring slot to point to header + * stored in reserved region. + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + start_dp[idx].len = vq->hw->vtnet_hdr_size; + start_dp[idx].flags = VRING_DESC_F_NEXT; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + + idx = start_dp[idx].next; + } + + virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); + + do { + start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); + start_dp[idx].len = cookie->data_len; + start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0; + idx = start_dp[idx].next; + } while ((cookie = cookie->next) != NULL); + + if (use_indirect) + idx = vq->vq_split.ring.desc[head_idx].next; + + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); + + vq->vq_desc_head_idx = idx; + vq_update_avail_ring(vq, head_idx); + + if (!in_order) { + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = idx; + } +} + +void +virtio_dev_cq_start(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + + if (hw->cvq && hw->cvq->vq) { + rte_spinlock_init(&hw->cvq->lock); + VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq); + } +} + +int +virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mp) +{ + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_rx *rxvq; + + PMD_INIT_FUNC_TRACE(); + + if (nb_desc == 0 || nb_desc > vq->vq_nentries) + nb_desc = vq->vq_nentries; + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); + + rxvq = &vq->rxq; + rxvq->queue_id = queue_idx; + rxvq->mpool = mp; + if (rxvq->mpool == NULL) { + rte_exit(EXIT_FAILURE, + "Cannot allocate mbufs for rx virtqueue"); + } + + dev->data->rx_queues[queue_idx] = rxvq; + + return 0; +} + +int +virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_rx *rxvq = &vq->rxq; + struct rte_mbuf *m; + uint16_t desc_idx; + int error, nbufs, i; + + PMD_INIT_FUNC_TRACE(); + + /* Allocate blank mbufs for the each rx descriptor */ + nbufs = 0; + + if (hw->use_simple_rx) { + for (desc_idx = 0; desc_idx < vq->vq_nentries; + desc_idx++) { + vq->vq_split.ring.avail->ring[desc_idx] = desc_idx; + vq->vq_split.ring.desc[desc_idx].flags = + VRING_DESC_F_WRITE; + } + + virtio_rxq_vec_setup(rxvq); + } + + memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); + for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; + desc_idx++) { + vq->sw_ring[vq->vq_nentries + desc_idx] = + &rxvq->fake_mbuf; + } + + if (hw->use_simple_rx) { + while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { + virtio_rxq_rearm_vec(rxvq); + nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH; + } + } else if (hw->use_inorder_rx) { + if ((!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *pkts[free_cnt]; + + if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts, + free_cnt)) { + error = virtqueue_enqueue_refill_inorder(vq, + pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(pkts[i]); + } + } + + nbufs += free_cnt; + vq_update_avail_idx(vq); + } + } else { + while (!virtqueue_full(vq)) { + m = rte_mbuf_raw_alloc(rxvq->mpool); + if (m == NULL) + break; + + /* Enqueue allocated buffers */ + if (vtpci_packed_queue(vq->hw)) + error = virtqueue_enqueue_recv_refill_packed(vq, + &m, 1); + else + error = virtqueue_enqueue_recv_refill(vq, + &m, 1); + if (error) { + rte_pktmbuf_free(m); + break; + } + nbufs++; + } + + if (!vtpci_packed_queue(vq->hw)) + vq_update_avail_idx(vq); + } + + PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); + + VIRTQUEUE_DUMP(vq); + + return 0; +} + +/* + * struct rte_eth_dev *dev: Used to update dev + * uint16_t nb_desc: Defaults to values read from config space + * unsigned int socket_id: Used to allocate memzone + * const struct rte_eth_txconf *tx_conf: Used to setup tx engine + * uint16_t queue_idx: Just used as an index in dev txq list + */ +int +virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf) +{ + uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_tx *txvq; + uint16_t tx_free_thresh; + + PMD_INIT_FUNC_TRACE(); + + if (nb_desc == 0 || nb_desc > vq->vq_nentries) + nb_desc = vq->vq_nentries; + vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); + + txvq = &vq->txq; + txvq->queue_id = queue_idx; + + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = + RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh >= (vq->vq_nentries - 3)) { + RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the " + "number of TX entries minus 3 (%u)." + " (tx_free_thresh=%u port=%u queue=%u)\n", + vq->vq_nentries - 3, + tx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + + vq->vq_free_thresh = tx_free_thresh; + + dev->data->tx_queues[queue_idx] = txvq; + return 0; +} + +int +virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, + uint16_t queue_idx) +{ + uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + + PMD_INIT_FUNC_TRACE(); + + if (!vtpci_packed_queue(hw)) { + if (hw->use_inorder_tx) + vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0; + } + + VIRTQUEUE_DUMP(vq); + + return 0; +} + +static inline void virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) { int error; @@ -527,133 +1065,727 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) * Requeue the discarded mbuf. This should always be * successful since it was just dequeued. */ - error = virtqueue_enqueue_recv_refill(vq, m); + if (vtpci_packed_queue(vq->hw)) + error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1); + else + error = virtqueue_enqueue_recv_refill(vq, &m, 1); + + if (unlikely(error)) { + RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf"); + rte_pktmbuf_free(m); + } +} + +static inline void +virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m) +{ + int error; + + error = virtqueue_enqueue_refill_inorder(vq, &m, 1); if (unlikely(error)) { RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf"); rte_pktmbuf_free(m); } -} +} + +static inline void +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) +{ + uint32_t s = mbuf->pkt_len; + struct rte_ether_addr *ea; + + stats->bytes += s; + + if (s == 64) { + stats->size_bins[1]++; + } else if (s > 64 && s < 1024) { + uint32_t bin; + + /* count zeros, and offset into correct bin */ + bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; + stats->size_bins[bin]++; + } else { + if (s < 64) + stats->size_bins[0]++; + else if (s < 1519) + stats->size_bins[6]++; + else + stats->size_bins[7]++; + } + + ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) + stats->broadcast++; + else + stats->multicast++; + } +} + +static inline void +virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m) +{ + VIRTIO_DUMP_PACKET(m, m->data_len); + + virtio_update_packet_stats(&rxvq->stats, m); +} + +/* Optionally fill offload information in structure */ +static inline int +virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) +{ + struct rte_net_hdr_lens hdr_lens; + uint32_t hdrlen, ptype; + int l4_supported = 0; + + /* nothing to do */ + if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) + return 0; + + m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + + ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK); + m->packet_type = ptype; + if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP || + (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP || + (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP) + l4_supported = 1; + + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len; + if (hdr->csum_start <= hdrlen && l4_supported) { + m->ol_flags |= PKT_RX_L4_CKSUM_NONE; + } else { + /* Unknown proto or tunnel, do sw cksum. We can assume + * the cksum field is in the first segment since the + * buffers we provided to the host are large enough. + * In case of SCTP, this will be wrong since it's a CRC + * but there's nothing we can do. + */ + uint16_t csum = 0, off; + + rte_raw_cksum_mbuf(m, hdr->csum_start, + rte_pktmbuf_pkt_len(m) - hdr->csum_start, + &csum); + if (likely(csum != 0xffff)) + csum = ~csum; + off = hdr->csum_offset + hdr->csum_start; + if (rte_pktmbuf_data_len(m) >= off + 1) + *rte_pktmbuf_mtod_offset(m, uint16_t *, + off) = csum; + } + } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) { + m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + } + + /* GSO request, save required information in mbuf */ + if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + /* Check unsupported modes */ + if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) || + (hdr->gso_size == 0)) { + return -EINVAL; + } + + /* Update mss lengthes in mbuf */ + m->tso_segsz = hdr->gso_size; + switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + case VIRTIO_NET_HDR_GSO_TCPV6: + m->ol_flags |= PKT_RX_LRO | \ + PKT_RX_L4_CKSUM_NONE; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +#define VIRTIO_MBUF_BURST_SZ 64 +#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) +uint16_t +virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; + struct virtio_hw *hw = vq->hw; + struct rte_mbuf *rxm; + uint16_t nb_used, num, nb_rx; + uint32_t len[VIRTIO_MBUF_BURST_SZ]; + struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; + int error; + uint32_t i, nb_enqueued; + uint32_t hdr_size; + struct virtio_net_hdr *hdr; + + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + + nb_used = VIRTQUEUE_NUSED(vq); + + virtio_rmb(hw->weak_barriers); + + num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; + if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) + num = VIRTIO_MBUF_BURST_SZ; + if (likely(num > DESC_PER_CACHELINE)) + num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); + + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); + PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); + + nb_enqueued = 0; + hdr_size = hw->vtnet_hdr_size; + + for (i = 0; i < num ; i++) { + rxm = rcv_pkts[i]; + + PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); + + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop"); + nb_enqueued++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } + + rxm->port = rxvq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + + rxm->pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); + + hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + + RTE_PKTMBUF_HEADROOM - hdr_size); + + if (hw->vlan_strip) + rte_vlan_strip(rxm); + + if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } + + virtio_rx_stats_updated(rxvq, rxm); + + rx_pkts[nb_rx++] = rxm; + } + + rxvq->stats.packets += nb_rx; + + /* Allocate new mbuf for the used descriptor */ + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; + } + } + + if (likely(nb_enqueued)) { + vq_update_avail_idx(vq); + + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); + PMD_RX_LOG(DEBUG, "Notified"); + } + } + + return nb_rx; +} + +uint16_t +virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; + struct virtio_hw *hw = vq->hw; + struct rte_mbuf *rxm; + uint16_t num, nb_rx; + uint32_t len[VIRTIO_MBUF_BURST_SZ]; + struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; + int error; + uint32_t i, nb_enqueued; + uint32_t hdr_size; + struct virtio_net_hdr *hdr; + + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + + num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts); + if (likely(num > DESC_PER_CACHELINE)) + num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); + + num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); + PMD_RX_LOG(DEBUG, "dequeue:%d", num); + + nb_enqueued = 0; + hdr_size = hw->vtnet_hdr_size; + + for (i = 0; i < num; i++) { + rxm = rcv_pkts[i]; + + PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); + + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop"); + nb_enqueued++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } + + rxm->port = rxvq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + + rxm->pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); + + hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + + RTE_PKTMBUF_HEADROOM - hdr_size); + + if (hw->vlan_strip) + rte_vlan_strip(rxm); + + if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } + + virtio_rx_stats_updated(rxvq, rxm); + + rx_pkts[nb_rx++] = rxm; + } + + rxvq->stats.packets += nb_rx; + + /* Allocate new mbuf for the used descriptor */ + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill_packed(vq, + new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; + } + } + + if (likely(nb_enqueued)) { + if (unlikely(virtqueue_kick_prepare_packed(vq))) { + virtqueue_notify(vq); + PMD_RX_LOG(DEBUG, "Notified"); + } + } + + return nb_rx; +} + + +uint16_t +virtio_recv_pkts_inorder(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; + struct virtio_hw *hw = vq->hw; + struct rte_mbuf *rxm; + struct rte_mbuf *prev; + uint16_t nb_used, num, nb_rx; + uint32_t len[VIRTIO_MBUF_BURST_SZ]; + struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; + int error; + uint32_t nb_enqueued; + uint32_t seg_num; + uint32_t seg_res; + uint32_t hdr_size; + int32_t i; + + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + + nb_used = VIRTQUEUE_NUSED(vq); + nb_used = RTE_MIN(nb_used, nb_pkts); + nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ); + + virtio_rmb(hw->weak_barriers); + + PMD_RX_LOG(DEBUG, "used:%d", nb_used); + + nb_enqueued = 0; + seg_num = 1; + seg_res = 0; + hdr_size = hw->vtnet_hdr_size; + + num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used); + + for (i = 0; i < num; i++) { + struct virtio_net_hdr_mrg_rxbuf *header; + + PMD_RX_LOG(DEBUG, "dequeue:%d", num); + PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); + + rxm = rcv_pkts[i]; + + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop"); + nb_enqueued++; + virtio_discard_rxbuf_inorder(vq, rxm); + rxvq->stats.errors++; + continue; + } + + header = (struct virtio_net_hdr_mrg_rxbuf *) + ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM + - hdr_size); + + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { + seg_num = header->num_buffers; + if (seg_num == 0) + seg_num = 1; + } else { + seg_num = 1; + } + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->nb_segs = seg_num; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + rxm->pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); + + rxm->port = rxvq->port_id; + + rx_pkts[nb_rx] = rxm; + prev = rxm; + + if (vq->hw->has_rx_offload && + virtio_rx_offload(rxm, &header->hdr) < 0) { + virtio_discard_rxbuf_inorder(vq, rxm); + rxvq->stats.errors++; + continue; + } + + if (hw->vlan_strip) + rte_vlan_strip(rx_pkts[nb_rx]); + + seg_res = seg_num - 1; + + /* Merge remaining segments */ + while (seg_res != 0 && i < (num - 1)) { + i++; + + rxm = rcv_pkts[i]; + rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; + rxm->pkt_len = (uint32_t)(len[i]); + rxm->data_len = (uint16_t)(len[i]); + + rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); + rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); + + if (prev) + prev->next = rxm; + + prev = rxm; + seg_res -= 1; + } + + if (!seg_res) { + virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); + nb_rx++; + } + } + + /* Last packet still need merge segments */ + while (seg_res != 0) { + uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, + VIRTIO_MBUF_BURST_SZ); + + prev = rcv_pkts[nb_rx]; + if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { + virtio_rmb(hw->weak_barriers); + num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, + rcv_cnt); + uint16_t extra_idx = 0; + + rcv_cnt = num; + while (extra_idx < rcv_cnt) { + rxm = rcv_pkts[extra_idx]; + rxm->data_off = + RTE_PKTMBUF_HEADROOM - hdr_size; + rxm->pkt_len = (uint32_t)(len[extra_idx]); + rxm->data_len = (uint16_t)(len[extra_idx]); + prev->next = rxm; + prev = rxm; + rx_pkts[nb_rx]->pkt_len += len[extra_idx]; + rx_pkts[nb_rx]->data_len += len[extra_idx]; + extra_idx += 1; + }; + seg_res -= rcv_cnt; + + if (!seg_res) { + virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); + nb_rx++; + } + } else { + PMD_RX_LOG(ERR, + "No enough segments for packet."); + virtio_discard_rxbuf_inorder(vq, prev); + rxvq->stats.errors++; + break; + } + } -static void -virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf) -{ - uint32_t s = mbuf->pkt_len; - struct ether_addr *ea; + rxvq->stats.packets += nb_rx; - if (s == 64) { - vq->size_bins[1]++; - } else if (s > 64 && s < 1024) { - uint32_t bin; + /* Allocate new mbuf for the used descriptor */ - /* count zeros, and offset into correct bin */ - bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; - vq->size_bins[bin]++; - } else { - if (s < 64) - vq->size_bins[0]++; - else if (s < 1519) - vq->size_bins[6]++; - else if (s >= 1519) - vq->size_bins[7]++; + if (likely(!virtqueue_full(vq))) { + /* free_cnt may include mrg descs */ + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { + error = virtqueue_enqueue_refill_inorder(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; + } + } + + if (likely(nb_enqueued)) { + vq_update_avail_idx(vq); + + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); + PMD_RX_LOG(DEBUG, "Notified"); + } } - ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *); - vq->multicast += is_multicast_ether_addr(ea); - vq->broadcast += is_broadcast_ether_addr(ea); + return nb_rx; } -#define VIRTIO_MBUF_BURST_SZ 64 -#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) uint16_t -virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +virtio_recv_mergeable_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; - struct virtio_hw *hw; - struct rte_mbuf *rxm, *new_mbuf; - uint16_t nb_used, num, nb_rx; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; + struct virtio_hw *hw = vq->hw; + struct rte_mbuf *rxm; + struct rte_mbuf *prev; + uint16_t nb_used, num, nb_rx = 0; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; int error; - uint32_t i, nb_enqueued; - const uint32_t hdr_size = sizeof(struct virtio_net_hdr); + uint32_t nb_enqueued = 0; + uint32_t seg_num = 0; + uint32_t seg_res = 0; + uint32_t hdr_size = hw->vtnet_hdr_size; + int32_t i; - nb_used = VIRTQUEUE_NUSED(rxvq); + if (unlikely(hw->started == 0)) + return nb_rx; - virtio_rmb(); + nb_used = VIRTQUEUE_NUSED(vq); - num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); - num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); - if (likely(num > DESC_PER_CACHELINE)) - num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); + virtio_rmb(hw->weak_barriers); - if (num == 0) - return 0; + PMD_RX_LOG(DEBUG, "used:%d", nb_used); - num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num); - PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); + num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; + if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) + num = VIRTIO_MBUF_BURST_SZ; + if (likely(num > DESC_PER_CACHELINE)) + num = num - ((vq->vq_used_cons_idx + num) % + DESC_PER_CACHELINE); - hw = rxvq->hw; - nb_rx = 0; - nb_enqueued = 0; - for (i = 0; i < num ; i++) { - rxm = rcv_pkts[i]; + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); + + for (i = 0; i < num; i++) { + struct virtio_net_hdr_mrg_rxbuf *header; + PMD_RX_LOG(DEBUG, "dequeue:%d", num); PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); - if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { + rxm = rcv_pkts[i]; + + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; continue; } - rxm->port = rxvq->port_id; - rxm->data_off = RTE_PKTMBUF_HEADROOM; + header = (struct virtio_net_hdr_mrg_rxbuf *) + ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM + - hdr_size); + seg_num = header->num_buffers; + if (seg_num == 0) + seg_num = 1; - rxm->nb_segs = 1; - rxm->next = NULL; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->nb_segs = seg_num; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; rxm->pkt_len = (uint32_t)(len[i] - hdr_size); rxm->data_len = (uint16_t)(len[i] - hdr_size); + rxm->port = rxvq->port_id; + + rx_pkts[nb_rx] = rxm; + prev = rxm; + + if (hw->has_rx_offload && + virtio_rx_offload(rxm, &header->hdr) < 0) { + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } + if (hw->vlan_strip) - rte_vlan_strip(rxm); + rte_vlan_strip(rx_pkts[nb_rx]); + + seg_res = seg_num - 1; - VIRTIO_DUMP_PACKET(rxm, rxm->data_len); + /* Merge remaining segments */ + while (seg_res != 0 && i < (num - 1)) { + i++; - rx_pkts[nb_rx++] = rxm; + rxm = rcv_pkts[i]; + rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; + rxm->pkt_len = (uint32_t)(len[i]); + rxm->data_len = (uint16_t)(len[i]); - rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len; - virtio_update_packet_stats(rxvq, rxm); + rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); + rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); + + if (prev) + prev->next = rxm; + + prev = rxm; + seg_res -= 1; + } + + if (!seg_res) { + virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); + nb_rx++; + } } - rxvq->packets += nb_rx; + /* Last packet still need merge segments */ + while (seg_res != 0) { + uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, + VIRTIO_MBUF_BURST_SZ); - /* Allocate new mbuf for the used descriptor */ - error = ENOSPC; - while (likely(!virtqueue_full(rxvq))) { - new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; + prev = rcv_pkts[nb_rx]; + if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { + virtio_rmb(hw->weak_barriers); + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, + rcv_cnt); + uint16_t extra_idx = 0; + + rcv_cnt = num; + while (extra_idx < rcv_cnt) { + rxm = rcv_pkts[extra_idx]; + rxm->data_off = + RTE_PKTMBUF_HEADROOM - hdr_size; + rxm->pkt_len = (uint32_t)(len[extra_idx]); + rxm->data_len = (uint16_t)(len[extra_idx]); + prev->next = rxm; + prev = rxm; + rx_pkts[nb_rx]->pkt_len += len[extra_idx]; + rx_pkts[nb_rx]->data_len += len[extra_idx]; + extra_idx += 1; + }; + seg_res -= rcv_cnt; + + if (!seg_res) { + virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); + nb_rx++; + } + } else { + PMD_RX_LOG(ERR, + "No enough segments for packet."); + virtio_discard_rxbuf(vq, prev); + rxvq->stats.errors++; break; } - error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + } + + rxvq->stats.packets += nb_rx; + + /* Allocate new mbuf for the used descriptor */ + if (likely(!virtqueue_full(vq))) { + /* free_cnt may include mrg descs */ + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { + error = virtqueue_enqueue_recv_refill(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - nb_enqueued++; } if (likely(nb_enqueued)) { - vq_update_avail_idx(rxvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(rxvq))) { - virtqueue_notify(rxvq); - PMD_RX_LOG(DEBUG, "Notified\n"); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); + PMD_RX_LOG(DEBUG, "Notified"); } } @@ -661,68 +1793,55 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } uint16_t -virtio_recv_mergeable_pkts(void *rx_queue, +virtio_recv_mergeable_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; - struct virtio_hw *hw; - struct rte_mbuf *rxm, *new_mbuf; - uint16_t nb_used, num, nb_rx; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; + struct virtio_hw *hw = vq->hw; + struct rte_mbuf *rxm; + struct rte_mbuf *prev = NULL; + uint16_t num, nb_rx = 0; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; - struct rte_mbuf *prev; + uint32_t nb_enqueued = 0; + uint32_t seg_num = 0; + uint32_t seg_res = 0; + uint32_t hdr_size = hw->vtnet_hdr_size; + int32_t i; int error; - uint32_t i, nb_enqueued; - uint32_t seg_num; - uint16_t extra_idx; - uint32_t seg_res; - const uint32_t hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); - nb_used = VIRTQUEUE_NUSED(rxvq); + if (unlikely(hw->started == 0)) + return nb_rx; - virtio_rmb(); - - if (nb_used == 0) - return 0; - PMD_RX_LOG(DEBUG, "used:%d\n", nb_used); + num = nb_pkts; + if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) + num = VIRTIO_MBUF_BURST_SZ; + if (likely(num > DESC_PER_CACHELINE)) + num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); - hw = rxvq->hw; - nb_rx = 0; - i = 0; - nb_enqueued = 0; - seg_num = 0; - extra_idx = 0; - seg_res = 0; + num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); - while (i < nb_used) { + for (i = 0; i < num; i++) { struct virtio_net_hdr_mrg_rxbuf *header; - if (nb_rx == nb_pkts) - break; - - num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1); - if (num != 1) - continue; - - i++; - - PMD_RX_LOG(DEBUG, "dequeue:%d\n", num); - PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]); + PMD_RX_LOG(DEBUG, "dequeue:%d", num); + PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); - rxm = rcv_pkts[0]; + rxm = rcv_pkts[i]; - if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) { - PMD_RX_LOG(ERR, "Packet drop\n"); + if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; continue; } - header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr + - RTE_PKTMBUF_HEADROOM - hdr_size); + header = (struct virtio_net_hdr_mrg_rxbuf *)((char *) + rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size); seg_num = header->num_buffers; if (seg_num == 0) @@ -730,182 +1849,420 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->nb_segs = seg_num; - rxm->next = NULL; - rxm->pkt_len = (uint32_t)(len[0] - hdr_size); - rxm->data_len = (uint16_t)(len[0] - hdr_size); + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + rxm->pkt_len = (uint32_t)(len[i] - hdr_size); + rxm->data_len = (uint16_t)(len[i] - hdr_size); rxm->port = rxvq->port_id; rx_pkts[nb_rx] = rxm; prev = rxm; + if (hw->has_rx_offload && + virtio_rx_offload(rxm, &header->hdr) < 0) { + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; + continue; + } + + if (hw->vlan_strip) + rte_vlan_strip(rx_pkts[nb_rx]); + seg_res = seg_num - 1; - while (seg_res != 0) { - /* - * Get extra segments for current uncompleted packet. - */ - uint16_t rcv_cnt = - RTE_MIN(seg_res, RTE_DIM(rcv_pkts)); - if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) { - uint32_t rx_num = - virtqueue_dequeue_burst_rx(rxvq, - rcv_pkts, len, rcv_cnt); - i += rx_num; - rcv_cnt = rx_num; - } else { - PMD_RX_LOG(ERR, - "No enough segments for packet.\n"); - nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; - break; - } + /* Merge remaining segments */ + while (seg_res != 0 && i < (num - 1)) { + i++; + + rxm = rcv_pkts[i]; + rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; + rxm->pkt_len = (uint32_t)(len[i]); + rxm->data_len = (uint16_t)(len[i]); + + rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); + rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); - extra_idx = 0; + if (prev) + prev->next = rxm; + + prev = rxm; + seg_res -= 1; + } + + if (!seg_res) { + virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); + nb_rx++; + } + } + + /* Last packet still need merge segments */ + while (seg_res != 0) { + uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, + VIRTIO_MBUF_BURST_SZ); + if (likely(vq->vq_free_cnt >= rcv_cnt)) { + num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, + len, rcv_cnt); + uint16_t extra_idx = 0; + + rcv_cnt = num; while (extra_idx < rcv_cnt) { rxm = rcv_pkts[extra_idx]; - rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; - rxm->next = NULL; + rxm->data_off = + RTE_PKTMBUF_HEADROOM - hdr_size; rxm->pkt_len = (uint32_t)(len[extra_idx]); rxm->data_len = (uint16_t)(len[extra_idx]); - if (prev) - prev->next = rxm; - + prev->next = rxm; prev = rxm; - rx_pkts[nb_rx]->pkt_len += rxm->pkt_len; - extra_idx++; - }; + rx_pkts[nb_rx]->pkt_len += len[extra_idx]; + rx_pkts[nb_rx]->data_len += len[extra_idx]; + extra_idx += 1; + } seg_res -= rcv_cnt; + if (!seg_res) { + virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); + nb_rx++; + } + } else { + PMD_RX_LOG(ERR, + "No enough segments for packet."); + if (prev) + virtio_discard_rxbuf(vq, prev); + rxvq->stats.errors++; + break; } + } - if (hw->vlan_strip) - rte_vlan_strip(rx_pkts[nb_rx]); + rxvq->stats.packets += nb_rx; - VIRTIO_DUMP_PACKET(rx_pkts[nb_rx], - rx_pkts[nb_rx]->data_len); + /* Allocate new mbuf for the used descriptor */ + if (likely(!virtqueue_full(vq))) { + /* free_cnt may include mrg descs */ + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { + error = virtqueue_enqueue_recv_refill_packed(vq, + new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; + } + } - rxvq->bytes += rx_pkts[nb_rx]->pkt_len; - virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]); - nb_rx++; + if (likely(nb_enqueued)) { + if (unlikely(virtqueue_kick_prepare_packed(vq))) { + virtqueue_notify(vq); + PMD_RX_LOG(DEBUG, "Notified"); + } } - rxvq->packets += nb_rx; + return nb_rx; +} - /* Allocate new mbuf for the used descriptor */ - error = ENOSPC; - while (likely(!virtqueue_full(rxvq))) { - new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; +uint16_t +virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; + struct virtio_hw *hw = vq->hw; + uint16_t hdr_size = hw->vtnet_hdr_size; + uint16_t nb_tx = 0; + bool in_order = hw->use_inorder_tx; + int error; + + if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) + return nb_tx; + + if (unlikely(nb_pkts < 1)) + return nb_pkts; + + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); + + if (nb_pkts > vq->vq_free_cnt) + virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt, + in_order); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *txm = tx_pkts[nb_tx]; + int can_push = 0, slots, need; + + /* Do VLAN tag insertion */ + if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { + error = rte_vlan_insert(&txm); + if (unlikely(error)) { + rte_pktmbuf_free(txm); + continue; + } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; } - error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; + + /* optimize ring usage */ + if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && + rte_mbuf_refcnt_read(txm) == 1 && + RTE_MBUF_DIRECT(txm) && + txm->nb_segs == 1 && + rte_pktmbuf_headroom(txm) >= hdr_size && + rte_is_aligned(rte_pktmbuf_mtod(txm, char *), + __alignof__(struct virtio_net_hdr_mrg_rxbuf))) + can_push = 1; + + /* How many main ring entries are needed to this Tx? + * any_layout => number of segments + * default => number of segments + 1 + */ + slots = txm->nb_segs + !can_push; + need = slots - vq->vq_free_cnt; + + /* Positive value indicates it need free vring descriptors */ + if (unlikely(need > 0)) { + virtio_xmit_cleanup_packed(vq, need, in_order); + need = slots - vq->vq_free_cnt; + if (unlikely(need > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); + break; + } } - nb_enqueued++; + + /* Enqueue Packet buffers */ + if (can_push) + virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order); + else + virtqueue_enqueue_xmit_packed(txvq, txm, slots, 0, + in_order); + + virtio_update_packet_stats(&txvq->stats, txm); } - if (likely(nb_enqueued)) { - vq_update_avail_idx(rxvq); + txvq->stats.packets += nb_tx; - if (unlikely(virtqueue_kick_prepare(rxvq))) { - virtqueue_notify(rxvq); - PMD_RX_LOG(DEBUG, "Notified"); + if (likely(nb_tx)) { + if (unlikely(virtqueue_kick_prepare_packed(vq))) { + virtqueue_notify(vq); + PMD_TX_LOG(DEBUG, "Notified backend after xmit"); } } - return nb_rx; + return nb_tx; } uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct virtqueue *txvq = tx_queue; - struct rte_mbuf *txm; - uint16_t nb_used, nb_tx; + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; + struct virtio_hw *hw = vq->hw; + uint16_t hdr_size = hw->vtnet_hdr_size; + uint16_t nb_used, nb_tx = 0; int error; + if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) + return nb_tx; + if (unlikely(nb_pkts < 1)) return nb_pkts; PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); - nb_used = VIRTQUEUE_NUSED(txvq); + nb_used = VIRTQUEUE_NUSED(vq); - virtio_rmb(); - if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh)) - virtio_xmit_cleanup(txvq, nb_used); + virtio_rmb(hw->weak_barriers); + if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) + virtio_xmit_cleanup(vq, nb_used); - nb_tx = 0; + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *txm = tx_pkts[nb_tx]; + int can_push = 0, use_indirect = 0, slots, need; + + /* Do VLAN tag insertion */ + if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { + error = rte_vlan_insert(&txm); + if (unlikely(error)) { + rte_pktmbuf_free(txm); + continue; + } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; + } - while (nb_tx < nb_pkts) { - /* Need one more descriptor for virtio header. */ - int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1; + /* optimize ring usage */ + if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && + rte_mbuf_refcnt_read(txm) == 1 && + RTE_MBUF_DIRECT(txm) && + txm->nb_segs == 1 && + rte_pktmbuf_headroom(txm) >= hdr_size && + rte_is_aligned(rte_pktmbuf_mtod(txm, char *), + __alignof__(struct virtio_net_hdr_mrg_rxbuf))) + can_push = 1; + else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && + txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) + use_indirect = 1; + + /* How many main ring entries are needed to this Tx? + * any_layout => number of segments + * indirect => 1 + * default => number of segments + 1 + */ + slots = use_indirect ? 1 : (txm->nb_segs + !can_push); + need = slots - vq->vq_free_cnt; - /*Positive value indicates it need free vring descriptors */ + /* Positive value indicates it need free vring descriptors */ if (unlikely(need > 0)) { - nb_used = VIRTQUEUE_NUSED(txvq); - virtio_rmb(); + nb_used = VIRTQUEUE_NUSED(vq); + virtio_rmb(hw->weak_barriers); need = RTE_MIN(need, (int)nb_used); - virtio_xmit_cleanup(txvq, need); - need = (int)tx_pkts[nb_tx]->nb_segs - - txvq->vq_free_cnt + 1; + virtio_xmit_cleanup(vq, need); + need = slots - vq->vq_free_cnt; + if (unlikely(need > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); + break; + } } - /* - * Zero or negative value indicates it has enough free - * descriptors to use for transmitting. - */ - if (likely(need <= 0)) { - txm = tx_pkts[nb_tx]; + /* Enqueue Packet buffers */ + virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, + can_push, 0); - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - ++nb_tx; - continue; - } - } + virtio_update_packet_stats(&txvq->stats, txm); + } + + txvq->stats.packets += nb_tx; + + if (likely(nb_tx)) { + vq_update_avail_idx(vq); + + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); + PMD_TX_LOG(DEBUG, "Notified backend after xmit"); + } + } + + return nb_tx; +} + +uint16_t +virtio_xmit_pkts_inorder(void *tx_queue, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; + struct virtio_hw *hw = vq->hw; + uint16_t hdr_size = hw->vtnet_hdr_size; + uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0; + struct rte_mbuf *inorder_pkts[nb_pkts]; + int error; + + if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) + return nb_tx; + + if (unlikely(nb_pkts < 1)) + return nb_pkts; + + VIRTQUEUE_DUMP(vq); + PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); + nb_used = VIRTQUEUE_NUSED(vq); + + virtio_rmb(hw->weak_barriers); + if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) + virtio_xmit_cleanup_inorder(vq, nb_used); + + if (unlikely(!vq->vq_free_cnt)) + virtio_xmit_cleanup_inorder(vq, nb_used); + + nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts); - /* Enqueue Packet buffers */ - error = virtqueue_enqueue_xmit(txvq, txm); + for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) { + struct rte_mbuf *txm = tx_pkts[nb_tx]; + int slots, need; + + /* Do VLAN tag insertion */ + if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { + error = rte_vlan_insert(&txm); if (unlikely(error)) { - if (error == ENOSPC) - PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0"); - else if (error == EMSGSIZE) - PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1"); - else - PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error); + rte_pktmbuf_free(txm); + continue; + } + /* vlan_insert may add a header mbuf */ + tx_pkts[nb_tx] = txm; + } + + /* optimize ring usage */ + if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && + rte_mbuf_refcnt_read(txm) == 1 && + RTE_MBUF_DIRECT(txm) && + txm->nb_segs == 1 && + rte_pktmbuf_headroom(txm) >= hdr_size && + rte_is_aligned(rte_pktmbuf_mtod(txm, char *), + __alignof__(struct virtio_net_hdr_mrg_rxbuf))) { + inorder_pkts[nb_inorder_pkts] = txm; + nb_inorder_pkts++; + + virtio_update_packet_stats(&txvq->stats, txm); + continue; + } + + if (nb_inorder_pkts) { + virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, + nb_inorder_pkts); + nb_inorder_pkts = 0; + } + + slots = txm->nb_segs + 1; + need = slots - vq->vq_free_cnt; + if (unlikely(need > 0)) { + nb_used = VIRTQUEUE_NUSED(vq); + virtio_rmb(hw->weak_barriers); + need = RTE_MIN(need, (int)nb_used); + + virtio_xmit_cleanup_inorder(vq, need); + + need = slots - vq->vq_free_cnt; + + if (unlikely(need > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); break; } - nb_tx++; - txvq->bytes += txm->pkt_len; - virtio_update_packet_stats(txvq, txm); - } else { - PMD_TX_LOG(ERR, "No free tx descriptors to transmit"); - break; } + /* Enqueue Packet buffers */ + virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1); + + virtio_update_packet_stats(&txvq->stats, txm); } - txvq->packets += nb_tx; + /* Transmit all inorder packets */ + if (nb_inorder_pkts) + virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, + nb_inorder_pkts); + + txvq->stats.packets += nb_tx; if (likely(nb_tx)) { - vq_update_avail_idx(txvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(txvq))) { - virtqueue_notify(txvq); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); PMD_TX_LOG(DEBUG, "Notified backend after xmit"); } } + VIRTQUEUE_DUMP(vq); + return nb_tx; }