X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=0243573a35d86b54aef3256dd7ff9ee79a28dfcf;hb=d1eafb5322682635599e78d4f04e4bfee27e7edc;hp=f8732dfec7b960df7b1ceab71c51b32214470ed9;hpb=e5c494a7a22b6db986ac64a6ca85d3fe5e4418d0;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index f8732dfec7..0243573a35 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include @@ -44,13 +15,22 @@ #include #include #include +#include +#include +#include "iotlb.h" #include "vhost.h" #define MAX_PKT_BURST 32 #define MAX_BATCH_LEN 256 +static __rte_always_inline bool +rxvq_is_mergeable(struct virtio_net *dev) +{ + return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF); +} + static bool is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) { @@ -58,53 +38,312 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) } static __rte_always_inline void -do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint16_t to, uint16_t from, uint16_t size) +do_flush_shadow_used_ring_split(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint16_t to, uint16_t from, uint16_t size) { rte_memcpy(&vq->used->ring[to], - &vq->shadow_used_ring[from], + &vq->shadow_used_split[from], size * sizeof(struct vring_used_elem)); - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[to]), size * sizeof(struct vring_used_elem)); } static __rte_always_inline void -flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) +flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t used_idx = vq->last_used_idx & (vq->size - 1); if (used_idx + vq->shadow_used_idx <= vq->size) { - do_flush_shadow_used_ring(dev, vq, used_idx, 0, + do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, vq->shadow_used_idx); } else { uint16_t size; /* update used ring interval [used_idx, vq->size] */ size = vq->size - used_idx; - do_flush_shadow_used_ring(dev, vq, used_idx, 0, size); + do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size); /* update the left half used ring interval [0, left_size] */ - do_flush_shadow_used_ring(dev, vq, 0, size, + do_flush_shadow_used_ring_split(dev, vq, 0, size, vq->shadow_used_idx - size); } vq->last_used_idx += vq->shadow_used_idx; rte_smp_wmb(); + vhost_log_cache_sync(dev, vq); + *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx; + vq->shadow_used_idx = 0; vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx), sizeof(vq->used->idx)); } static __rte_always_inline void -update_shadow_used_ring(struct vhost_virtqueue *vq, - uint16_t desc_idx, uint16_t len) +update_shadow_used_ring_split(struct vhost_virtqueue *vq, + uint16_t desc_idx, uint32_t len) +{ + uint16_t i = vq->shadow_used_idx++; + + vq->shadow_used_split[i].id = desc_idx; + vq->shadow_used_split[i].len = len; +} + +static __rte_always_inline void +vhost_flush_enqueue_shadow_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + int i; + uint16_t used_idx = vq->last_used_idx; + uint16_t head_idx = vq->last_used_idx; + uint16_t head_flags = 0; + + /* Split loop in two to save memory barriers */ + for (i = 0; i < vq->shadow_used_idx; i++) { + vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id; + vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len; + + used_idx += vq->shadow_used_packed[i].count; + if (used_idx >= vq->size) + used_idx -= vq->size; + } + + rte_smp_wmb(); + + for (i = 0; i < vq->shadow_used_idx; i++) { + uint16_t flags; + + if (vq->shadow_used_packed[i].len) + flags = VRING_DESC_F_WRITE; + else + flags = 0; + + if (vq->used_wrap_counter) { + flags |= VRING_DESC_F_USED; + flags |= VRING_DESC_F_AVAIL; + } else { + flags &= ~VRING_DESC_F_USED; + flags &= ~VRING_DESC_F_AVAIL; + } + + if (i > 0) { + vq->desc_packed[vq->last_used_idx].flags = flags; + + vhost_log_cache_used_vring(dev, vq, + vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + } else { + head_idx = vq->last_used_idx; + head_flags = flags; + } + + vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count); + } + + vq->desc_packed[head_idx].flags = head_flags; + + vhost_log_cache_used_vring(dev, vq, + head_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + + vq->shadow_used_idx = 0; + vhost_log_cache_sync(dev, vq); +} + +static __rte_always_inline void +vhost_flush_dequeue_shadow_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0]; + + vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id; + rte_smp_wmb(); + vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags; + + vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + vq->shadow_used_idx = 0; + vhost_log_cache_sync(dev, vq); +} + +static __rte_always_inline void +vhost_flush_enqueue_batch_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint64_t *lens, + uint16_t *ids) +{ + uint16_t i; + uint16_t flags; + + flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + vq->desc_packed[vq->last_used_idx + i].id = ids[i]; + vq->desc_packed[vq->last_used_idx + i].len = lens[i]; + } + + rte_smp_wmb(); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + vq->desc_packed[vq->last_used_idx + i].flags = flags; + + vhost_log_cache_used_vring(dev, vq, vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc) * + PACKED_BATCH_SIZE); + vhost_log_cache_sync(dev, vq); + + vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE); +} + +static __rte_always_inline void +flush_shadow_used_ring_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + int i; + uint16_t used_idx = vq->last_used_idx; + uint16_t head_idx = vq->last_used_idx; + uint16_t head_flags = 0; + + /* Split loop in two to save memory barriers */ + for (i = 0; i < vq->shadow_used_idx; i++) { + vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id; + vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len; + + used_idx += vq->shadow_used_packed[i].count; + if (used_idx >= vq->size) + used_idx -= vq->size; + } + + for (i = 0; i < vq->shadow_used_idx; i++) { + uint16_t flags; + + if (vq->shadow_used_packed[i].len) + flags = VRING_DESC_F_WRITE; + else + flags = 0; + + if (vq->used_wrap_counter) { + flags |= VRING_DESC_F_USED; + flags |= VRING_DESC_F_AVAIL; + } else { + flags &= ~VRING_DESC_F_USED; + flags &= ~VRING_DESC_F_AVAIL; + } + + if (i > 0) { + vq->desc_packed[vq->last_used_idx].flags = flags; + + vhost_log_cache_used_vring(dev, vq, + vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + } else { + head_idx = vq->last_used_idx; + head_flags = flags; + } + + vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count); + } + + __atomic_store_n(&vq->desc_packed[head_idx].flags, head_flags, + __ATOMIC_RELEASE); + + vhost_log_cache_used_vring(dev, vq, + head_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + + vq->shadow_used_idx = 0; + vhost_log_cache_sync(dev, vq); +} + +static __rte_always_inline void +vhost_shadow_dequeue_batch_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint16_t *ids) +{ + uint16_t flags; + uint16_t i; + uint16_t begin; + + flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter); + + if (!vq->shadow_used_idx) { + vq->shadow_last_used_idx = vq->last_used_idx; + vq->shadow_used_packed[0].id = ids[0]; + vq->shadow_used_packed[0].len = 0; + vq->shadow_used_packed[0].count = 1; + vq->shadow_used_packed[0].flags = flags; + vq->shadow_used_idx++; + begin = 1; + } else + begin = 0; + + vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) { + vq->desc_packed[vq->last_used_idx + i].id = ids[i]; + vq->desc_packed[vq->last_used_idx + i].len = 0; + } + + rte_smp_wmb(); + vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) + vq->desc_packed[vq->last_used_idx + i].flags = flags; + + vhost_log_cache_used_vring(dev, vq, vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc) * + PACKED_BATCH_SIZE); + vhost_log_cache_sync(dev, vq); + + vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE); +} + +static __rte_always_inline void +vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq, + uint16_t buf_id, + uint16_t count) +{ + uint16_t flags; + + flags = vq->desc_packed[vq->last_used_idx].flags; + if (vq->used_wrap_counter) { + flags |= VRING_DESC_F_USED; + flags |= VRING_DESC_F_AVAIL; + } else { + flags &= ~VRING_DESC_F_USED; + flags &= ~VRING_DESC_F_AVAIL; + } + + if (!vq->shadow_used_idx) { + vq->shadow_last_used_idx = vq->last_used_idx; + + vq->shadow_used_packed[0].id = buf_id; + vq->shadow_used_packed[0].len = 0; + vq->shadow_used_packed[0].flags = flags; + vq->shadow_used_idx++; + } else { + vq->desc_packed[vq->last_used_idx].id = buf_id; + vq->desc_packed[vq->last_used_idx].len = 0; + vq->desc_packed[vq->last_used_idx].flags = flags; + } + + vq_inc_last_used_packed(vq, count); +} + +static __rte_always_inline void +update_shadow_used_ring_packed(struct vhost_virtqueue *vq, + uint16_t desc_idx, uint32_t len, uint16_t count) { uint16_t i = vq->shadow_used_idx++; - vq->shadow_used_ring[i].id = desc_idx; - vq->shadow_used_ring[i].len = len; + vq->shadow_used_packed[i].id = desc_idx; + vq->shadow_used_packed[i].len = len; + vq->shadow_used_packed[i].count = count; } static inline void @@ -116,9 +355,12 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq) for (i = 0; i < count; i++) { rte_memcpy(elem[i].dst, elem[i].src, elem[i].len); - vhost_log_write(dev, elem[i].log_addr, elem[i].len); + vhost_log_cache_write_iova(dev, vq, elem[i].log_addr, + elem[i].len); PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0); } + + vq->batch_copy_nb_elems = 0; } static inline void @@ -130,6 +372,54 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq) for (i = 0; i < count; i++) rte_memcpy(elem[i].dst, elem[i].src, elem[i].len); + + vq->batch_copy_nb_elems = 0; +} + +static __rte_always_inline void +vhost_shadow_enqueue_single_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint32_t len[], + uint16_t id[], + uint16_t count[], + uint16_t num_buffers) +{ + uint16_t i; + for (i = 0; i < num_buffers; i++) { + /* enqueue shadow flush action aligned with batch num */ + if (!vq->shadow_used_idx) + vq->shadow_aligned_idx = vq->last_used_idx & + PACKED_BATCH_MASK; + vq->shadow_used_packed[vq->shadow_used_idx].id = id[i]; + vq->shadow_used_packed[vq->shadow_used_idx].len = len[i]; + vq->shadow_used_packed[vq->shadow_used_idx].count = count[i]; + vq->shadow_aligned_idx += count[i]; + vq->shadow_used_idx++; + } + + if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) { + do_data_copy_enqueue(dev, vq); + vhost_flush_enqueue_shadow_packed(dev, vq); + } +} + +static __rte_unused void +vhost_flush_dequeue_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + int shadow_count; + if (!vq->shadow_used_idx) + return; + + shadow_count = vq->last_used_idx - vq->shadow_last_used_idx; + if (shadow_count <= 0) + shadow_count += vq->size; + + if ((uint32_t)shadow_count >= (vq->size - MAX_PKT_BURST)) { + do_data_copy_dequeue(vq); + vhost_flush_dequeue_shadow_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } /* avoid write operation when necessary, to lessen cache issues */ @@ -138,7 +428,7 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq) (var) = (val); \ } while (0) -static void +static __rte_always_inline void virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) { uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK; @@ -152,15 +442,15 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) switch (csum_l4) { case PKT_TX_TCP_CKSUM: - net_hdr->csum_offset = (offsetof(struct tcp_hdr, + net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr, cksum)); break; case PKT_TX_UDP_CKSUM: - net_hdr->csum_offset = (offsetof(struct udp_hdr, + net_hdr->csum_offset = (offsetof(struct rte_udp_hdr, dgram_cksum)); break; case PKT_TX_SCTP_CKSUM: - net_hdr->csum_offset = (offsetof(struct sctp_hdr, + net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr, cksum)); break; } @@ -172,9 +462,9 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) /* IP cksum verification cannot be bypassed, then calculate here */ if (m_buf->ol_flags & PKT_TX_IP_CKSUM) { - struct ipv4_hdr *ipv4_hdr; + struct rte_ipv4_hdr *ipv4_hdr; - ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *, + ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *, m_buf->l2_len); ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); } @@ -187,6 +477,11 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) net_hdr->gso_size = m_buf->tso_segsz; net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len + m_buf->l4_len; + } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) { + net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; + net_hdr->gso_size = m_buf->tso_segsz; + net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len + + m_buf->l4_len; } else { ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0); ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0); @@ -195,247 +490,104 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) } static __rte_always_inline int -copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, - struct vring_desc *descs, struct rte_mbuf *m, - uint16_t desc_idx, uint32_t size) -{ - uint32_t desc_avail, desc_offset; - uint32_t mbuf_avail, mbuf_offset; - uint32_t cpy_len; - struct vring_desc *desc; - uint64_t desc_addr; - /* A counter to avoid desc dead loop chain */ - uint16_t nr_desc = 1; - struct batch_copy_elem *batch_copy = vq->batch_copy_elems; - uint16_t copy_nb = vq->batch_copy_nb_elems; - int error = 0; - - desc = &descs[desc_idx]; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - /* - * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid - * performance issue with some versions of gcc (4.8.4 and 5.3.0) which - * otherwise stores offset on the stack instead of in a register. - */ - if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) { - error = -1; - goto out; - } - - rte_prefetch0((void *)(uintptr_t)desc_addr); - - virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr); - vhost_log_write(dev, desc->addr, dev->vhost_hlen); - PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0); - - desc_offset = dev->vhost_hlen; - desc_avail = desc->len - dev->vhost_hlen; - - mbuf_avail = rte_pktmbuf_data_len(m); - mbuf_offset = 0; - while (mbuf_avail != 0 || m->next != NULL) { - /* done with current mbuf, fetch next */ - if (mbuf_avail == 0) { - m = m->next; - - mbuf_offset = 0; - mbuf_avail = rte_pktmbuf_data_len(m); - } - - /* done with current desc buf, fetch next */ - if (desc_avail == 0) { - if ((desc->flags & VRING_DESC_F_NEXT) == 0) { - /* Room in vring buffer is not enough */ - error = -1; - goto out; - } - if (unlikely(desc->next >= size || ++nr_desc > size)) { - error = -1; - goto out; - } - - desc = &descs[desc->next]; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) { - error = -1; - goto out; - } - - desc_offset = 0; - desc_avail = desc->len; - } - - cpy_len = RTE_MIN(desc_avail, mbuf_avail); - if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) { - rte_memcpy((void *)((uintptr_t)(desc_addr + - desc_offset)), - rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), - cpy_len); - vhost_log_write(dev, desc->addr + desc_offset, cpy_len); - PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), - cpy_len, 0); - } else { - batch_copy[copy_nb].dst = - (void *)((uintptr_t)(desc_addr + desc_offset)); - batch_copy[copy_nb].src = - rte_pktmbuf_mtod_offset(m, void *, mbuf_offset); - batch_copy[copy_nb].log_addr = desc->addr + desc_offset; - batch_copy[copy_nb].len = cpy_len; - copy_nb++; - } - - mbuf_avail -= cpy_len; - mbuf_offset += cpy_len; - desc_avail -= cpy_len; - desc_offset += cpy_len; - } - -out: - vq->batch_copy_nb_elems = copy_nb; - - return error; -} - -/** - * This function adds buffers to the virtio devices RX virtqueue. Buffers can - * be received from the physical port or from another virtio device. A packet - * count is returned to indicate the number of packets that are successfully - * added to the RX queue. This function works when the mbuf is scattered, but - * it doesn't support the mergeable feature. - */ -static __rte_always_inline uint32_t -virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, - struct rte_mbuf **pkts, uint32_t count) +map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct buf_vector *buf_vec, uint16_t *vec_idx, + uint64_t desc_iova, uint64_t desc_len, uint8_t perm) { - struct vhost_virtqueue *vq; - uint16_t avail_idx, free_entries, start_idx; - uint16_t desc_indexes[MAX_PKT_BURST]; - struct vring_desc *descs; - uint16_t used_idx; - uint32_t i, sz; - - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { - RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, queue_id); - return 0; - } - - vq = dev->virtqueue[queue_id]; - if (unlikely(vq->enabled == 0)) - return 0; + uint16_t vec_id = *vec_idx; - avail_idx = *((volatile uint16_t *)&vq->avail->idx); - start_idx = vq->last_used_idx; - free_entries = avail_idx - start_idx; - count = RTE_MIN(count, free_entries); - count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST); - if (count == 0) - return 0; - - LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n", - dev->vid, start_idx, start_idx + count); - - vq->batch_copy_nb_elems = 0; - - /* Retrieve all of the desc indexes first to avoid caching issues. */ - rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]); - for (i = 0; i < count; i++) { - used_idx = (start_idx + i) & (vq->size - 1); - desc_indexes[i] = vq->avail->ring[used_idx]; - vq->used->ring[used_idx].id = desc_indexes[i]; - vq->used->ring[used_idx].len = pkts[i]->pkt_len + - dev->vhost_hlen; - vhost_log_used_vring(dev, vq, - offsetof(struct vring_used, ring[used_idx]), - sizeof(vq->used->ring[used_idx])); - } + while (desc_len) { + uint64_t desc_addr; + uint64_t desc_chunck_len = desc_len; - rte_prefetch0(&vq->desc[desc_indexes[0]]); - for (i = 0; i < count; i++) { - uint16_t desc_idx = desc_indexes[i]; - int err; + if (unlikely(vec_id >= BUF_VECTOR_MAX)) + return -1; - if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) { - descs = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, - vq->desc[desc_idx].addr); - if (unlikely(!descs)) { - count = i; - break; - } + desc_addr = vhost_iova_to_vva(dev, vq, + desc_iova, + &desc_chunck_len, + perm); + if (unlikely(!desc_addr)) + return -1; - desc_idx = 0; - sz = vq->desc[desc_idx].len / sizeof(*descs); - } else { - descs = vq->desc; - sz = vq->size; - } + rte_prefetch0((void *)(uintptr_t)desc_addr); - err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz); - if (unlikely(err)) { - used_idx = (start_idx + i) & (vq->size - 1); - vq->used->ring[used_idx].len = dev->vhost_hlen; - vhost_log_used_vring(dev, vq, - offsetof(struct vring_used, ring[used_idx]), - sizeof(vq->used->ring[used_idx])); - } + buf_vec[vec_id].buf_iova = desc_iova; + buf_vec[vec_id].buf_addr = desc_addr; + buf_vec[vec_id].buf_len = desc_chunck_len; - if (i + 1 < count) - rte_prefetch0(&vq->desc[desc_indexes[i+1]]); + desc_len -= desc_chunck_len; + desc_iova += desc_chunck_len; + vec_id++; } + *vec_idx = vec_id; - do_data_copy_enqueue(dev, vq); - - rte_smp_wmb(); - - *(volatile uint16_t *)&vq->used->idx += count; - vq->last_used_idx += count; - vhost_log_used_vring(dev, vq, - offsetof(struct vring_used, idx), - sizeof(vq->used->idx)); - - /* flush used->idx update before we read avail->flags. */ - rte_mb(); - - /* Kick the guest if necessary. */ - if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && (vq->callfd >= 0)) - eventfd_write(vq->callfd, (eventfd_t)1); - return count; + return 0; } static __rte_always_inline int -fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint32_t avail_idx, uint32_t *vec_idx, +fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint32_t avail_idx, uint16_t *vec_idx, struct buf_vector *buf_vec, uint16_t *desc_chain_head, - uint16_t *desc_chain_len) + uint32_t *desc_chain_len, uint8_t perm) { uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; - uint32_t vec_id = *vec_idx; + uint16_t vec_id = *vec_idx; uint32_t len = 0; + uint64_t dlen; + uint32_t nr_descs = vq->size; + uint32_t cnt = 0; struct vring_desc *descs = vq->desc; + struct vring_desc *idesc = NULL; + + if (unlikely(idx >= vq->size)) + return -1; *desc_chain_head = idx; if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[idx].len; + nr_descs = dlen / sizeof(struct vring_desc); + if (unlikely(nr_descs > vq->size)) + return -1; + descs = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr); + vhost_iova_to_vva(dev, vq, vq->desc[idx].addr, + &dlen, + VHOST_ACCESS_RO); if (unlikely(!descs)) return -1; + if (unlikely(dlen < vq->desc[idx].len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idesc = vhost_alloc_copy_ind_table(dev, vq, + vq->desc[idx].addr, vq->desc[idx].len); + if (unlikely(!idesc)) + return -1; + + descs = idesc; + } + idx = 0; } while (1) { - if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) + if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) { + free_ind_table(idesc); return -1; + } len += descs[idx].len; - buf_vec[vec_id].buf_addr = descs[idx].addr; - buf_vec[vec_id].buf_len = descs[idx].len; - buf_vec[vec_id].desc_idx = idx; - vec_id++; + + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[idx].addr, descs[idx].len, + perm))) { + free_ind_table(idesc); + return -1; + } if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0) break; @@ -446,6 +598,9 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, *desc_chain_len = len; *vec_idx = vec_id; + if (unlikely(!!idesc)) + free_ind_table(idesc); + return 0; } @@ -453,61 +608,220 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, * Returns -1 on fail, 0 on success */ static inline int -reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, +reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t size, struct buf_vector *buf_vec, - uint16_t *num_buffers, uint16_t avail_head) + uint16_t *num_buffers, uint16_t avail_head, + uint16_t *nr_vec) { uint16_t cur_idx; - uint32_t vec_idx = 0; - uint16_t tries = 0; + uint16_t vec_idx = 0; + uint16_t max_tries, tries = 0; uint16_t head_idx = 0; - uint16_t len = 0; + uint32_t len = 0; *num_buffers = 0; cur_idx = vq->last_avail_idx; + if (rxvq_is_mergeable(dev)) + max_tries = vq->size - 1; + else + max_tries = 1; + while (size > 0) { if (unlikely(cur_idx == avail_head)) return -1; + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal + * happened. + */ + if (unlikely(++tries > max_tries)) + return -1; - if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec, - &head_idx, &len) < 0)) + if (unlikely(fill_vec_buf_split(dev, vq, cur_idx, + &vec_idx, buf_vec, + &head_idx, &len, + VHOST_ACCESS_RW) < 0)) return -1; len = RTE_MIN(len, size); - update_shadow_used_ring(vq, head_idx, len); + update_shadow_used_ring_split(vq, head_idx, len); size -= len; cur_idx++; - tries++; *num_buffers += 1; - - /* - * if we tried all available ring items, and still - * can't get enough buf, it means something abnormal - * happened. - */ - if (unlikely(tries >= vq->size)) - return -1; } + *nr_vec = vec_idx; + return 0; } static __rte_always_inline int -copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, - struct rte_mbuf *m, struct buf_vector *buf_vec, - uint16_t num_buffers) +fill_vec_buf_packed_indirect(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct vring_packed_desc *desc, uint16_t *vec_idx, + struct buf_vector *buf_vec, uint32_t *len, uint8_t perm) { - uint32_t vec_idx = 0; - uint64_t desc_addr; + uint16_t i; + uint32_t nr_descs; + uint16_t vec_id = *vec_idx; + uint64_t dlen; + struct vring_packed_desc *descs, *idescs = NULL; + + dlen = desc->len; + descs = (struct vring_packed_desc *)(uintptr_t) + vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO); + if (unlikely(!descs)) + return -1; + + if (unlikely(dlen < desc->len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idescs = vhost_alloc_copy_ind_table(dev, + vq, desc->addr, desc->len); + if (unlikely(!idescs)) + return -1; + + descs = idescs; + } + + nr_descs = desc->len / sizeof(struct vring_packed_desc); + if (unlikely(nr_descs >= vq->size)) { + free_ind_table(idescs); + return -1; + } + + for (i = 0; i < nr_descs; i++) { + if (unlikely(vec_id >= BUF_VECTOR_MAX)) { + free_ind_table(idescs); + return -1; + } + + *len += descs[i].len; + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[i].addr, descs[i].len, + perm))) + return -1; + } + *vec_idx = vec_id; + + if (unlikely(!!idescs)) + free_ind_table(idescs); + + return 0; +} + +static __rte_always_inline int +fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint16_t avail_idx, uint16_t *desc_count, + struct buf_vector *buf_vec, uint16_t *vec_idx, + uint16_t *buf_id, uint32_t *len, uint8_t perm) +{ + bool wrap_counter = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; + uint16_t vec_id = *vec_idx; + + if (avail_idx < vq->last_avail_idx) + wrap_counter ^= 1; + + /* + * Perform a load-acquire barrier in desc_is_avail to + * enforce the ordering between desc flags and desc + * content. + */ + if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter))) + return -1; + + *desc_count = 0; + *len = 0; + + while (1) { + if (unlikely(vec_id >= BUF_VECTOR_MAX)) + return -1; + + if (unlikely(*desc_count >= vq->size)) + return -1; + + *desc_count += 1; + *buf_id = descs[avail_idx].id; + + if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) { + if (unlikely(fill_vec_buf_packed_indirect(dev, vq, + &descs[avail_idx], + &vec_id, buf_vec, + len, perm) < 0)) + return -1; + } else { + *len += descs[avail_idx].len; + + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[avail_idx].addr, + descs[avail_idx].len, + perm))) + return -1; + } + + if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0) + break; + + if (++avail_idx >= vq->size) { + avail_idx -= vq->size; + wrap_counter ^= 1; + } + } + + *vec_idx = vec_id; + + return 0; +} + +static __rte_noinline void +copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct buf_vector *buf_vec, + struct virtio_net_hdr_mrg_rxbuf *hdr) +{ + uint64_t len; + uint64_t remain = dev->vhost_hlen; + uint64_t src = (uint64_t)(uintptr_t)hdr, dst; + uint64_t iova = buf_vec->buf_iova; + + while (remain) { + len = RTE_MIN(remain, + buf_vec->buf_len); + dst = buf_vec->buf_addr; + rte_memcpy((void *)(uintptr_t)dst, + (void *)(uintptr_t)src, + len); + + PRINT_PACKET(dev, (uintptr_t)dst, + (uint32_t)len, 0); + vhost_log_cache_write_iova(dev, vq, + iova, len); + + remain -= len; + iova += len; + src += len; + buf_vec++; + } +} + +static __rte_always_inline int +copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf *m, struct buf_vector *buf_vec, + uint16_t nr_vec, uint16_t num_buffers) +{ + uint32_t vec_idx = 0; uint32_t mbuf_offset, mbuf_avail; - uint32_t desc_offset, desc_avail; + uint32_t buf_offset, buf_avail; + uint64_t buf_addr, buf_iova, buf_len; uint32_t cpy_len; - uint64_t hdr_addr, hdr_phys_addr; + uint64_t hdr_addr; struct rte_mbuf *hdr_mbuf; struct batch_copy_elem *batch_copy = vq->batch_copy_elems; - uint16_t copy_nb = vq->batch_copy_nb_elems; + struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL; int error = 0; if (unlikely(m == NULL)) { @@ -515,40 +829,54 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, goto out; } - desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr); - if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) { + buf_addr = buf_vec[vec_idx].buf_addr; + buf_iova = buf_vec[vec_idx].buf_iova; + buf_len = buf_vec[vec_idx].buf_len; + + if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) { error = -1; goto out; } hdr_mbuf = m; - hdr_addr = desc_addr; - hdr_phys_addr = buf_vec[vec_idx].buf_addr; - rte_prefetch0((void *)(uintptr_t)hdr_addr); + hdr_addr = buf_addr; + if (unlikely(buf_len < dev->vhost_hlen)) + hdr = &tmp_hdr; + else + hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr; - LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", dev->vid, num_buffers); - desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen; - desc_offset = dev->vhost_hlen; + if (unlikely(buf_len < dev->vhost_hlen)) { + buf_offset = dev->vhost_hlen - buf_len; + vec_idx++; + buf_addr = buf_vec[vec_idx].buf_addr; + buf_iova = buf_vec[vec_idx].buf_iova; + buf_len = buf_vec[vec_idx].buf_len; + buf_avail = buf_len - buf_offset; + } else { + buf_offset = dev->vhost_hlen; + buf_avail = buf_len - dev->vhost_hlen; + } mbuf_avail = rte_pktmbuf_data_len(m); mbuf_offset = 0; while (mbuf_avail != 0 || m->next != NULL) { - /* done with current desc buf, get the next one */ - if (desc_avail == 0) { + /* done with current buf, get the next one */ + if (buf_avail == 0) { vec_idx++; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, - buf_vec[vec_idx].buf_addr); - if (unlikely(!desc_addr)) { + if (unlikely(vec_idx >= nr_vec)) { error = -1; goto out; } - /* Prefetch buffer address. */ - rte_prefetch0((void *)(uintptr_t)desc_addr); - desc_offset = 0; - desc_avail = buf_vec[vec_idx].buf_len; + buf_addr = buf_vec[vec_idx].buf_addr; + buf_iova = buf_vec[vec_idx].buf_iova; + buf_len = buf_vec[vec_idx].buf_len; + + buf_offset = 0; + buf_avail = buf_len; } /* done with current mbuf, get the next one */ @@ -560,105 +888,161 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, } if (hdr_addr) { - struct virtio_net_hdr_mrg_rxbuf *hdr; - - hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t) - hdr_addr; virtio_enqueue_offload(hdr_mbuf, &hdr->hdr); - ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers); + if (rxvq_is_mergeable(dev)) + ASSIGN_UNLESS_EQUAL(hdr->num_buffers, + num_buffers); - vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen); - PRINT_PACKET(dev, (uintptr_t)hdr_addr, - dev->vhost_hlen, 0); + if (unlikely(hdr == &tmp_hdr)) { + copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr); + } else { + PRINT_PACKET(dev, (uintptr_t)hdr_addr, + dev->vhost_hlen, 0); + vhost_log_cache_write_iova(dev, vq, + buf_vec[0].buf_iova, + dev->vhost_hlen); + } hdr_addr = 0; } - cpy_len = RTE_MIN(desc_avail, mbuf_avail); + cpy_len = RTE_MIN(buf_avail, mbuf_avail); - if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) { - rte_memcpy((void *)((uintptr_t)(desc_addr + - desc_offset)), + if (likely(cpy_len > MAX_BATCH_LEN || + vq->batch_copy_nb_elems >= vq->size)) { + rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)), rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), cpy_len); - vhost_log_write(dev, - buf_vec[vec_idx].buf_addr + desc_offset, - cpy_len); - PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), + vhost_log_cache_write_iova(dev, vq, + buf_iova + buf_offset, + cpy_len); + PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset), cpy_len, 0); } else { - batch_copy[copy_nb].dst = - (void *)((uintptr_t)(desc_addr + desc_offset)); - batch_copy[copy_nb].src = + batch_copy[vq->batch_copy_nb_elems].dst = + (void *)((uintptr_t)(buf_addr + buf_offset)); + batch_copy[vq->batch_copy_nb_elems].src = rte_pktmbuf_mtod_offset(m, void *, mbuf_offset); - batch_copy[copy_nb].log_addr = - buf_vec[vec_idx].buf_addr + desc_offset; - batch_copy[copy_nb].len = cpy_len; - copy_nb++; + batch_copy[vq->batch_copy_nb_elems].log_addr = + buf_iova + buf_offset; + batch_copy[vq->batch_copy_nb_elems].len = cpy_len; + vq->batch_copy_nb_elems++; } mbuf_avail -= cpy_len; mbuf_offset += cpy_len; - desc_avail -= cpy_len; - desc_offset += cpy_len; + buf_avail -= cpy_len; + buf_offset += cpy_len; } out: - vq->batch_copy_nb_elems = copy_nb; return error; } -static __rte_always_inline uint32_t -virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, +static __rte_always_inline int +vhost_enqueue_single_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mbuf *pkt, + struct buf_vector *buf_vec, + uint16_t *nr_descs) +{ + uint16_t nr_vec = 0; + uint16_t avail_idx = vq->last_avail_idx; + uint16_t max_tries, tries = 0; + uint16_t buf_id = 0; + uint32_t len = 0; + uint16_t desc_count; + uint32_t size = pkt->pkt_len + dev->vhost_hlen; + uint16_t num_buffers = 0; + uint32_t buffer_len[vq->size]; + uint16_t buffer_buf_id[vq->size]; + uint16_t buffer_desc_count[vq->size]; + + if (rxvq_is_mergeable(dev)) + max_tries = vq->size - 1; + else + max_tries = 1; + + while (size > 0) { + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal + * happened. + */ + if (unlikely(++tries > max_tries)) + return -1; + + if (unlikely(fill_vec_buf_packed(dev, vq, + avail_idx, &desc_count, + buf_vec, &nr_vec, + &buf_id, &len, + VHOST_ACCESS_RW) < 0)) + return -1; + + len = RTE_MIN(len, size); + size -= len; + + buffer_len[num_buffers] = len; + buffer_buf_id[num_buffers] = buf_id; + buffer_desc_count[num_buffers] = desc_count; + num_buffers += 1; + + *nr_descs += desc_count; + avail_idx += desc_count; + if (avail_idx >= vq->size) + avail_idx -= vq->size; + } + + if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0) + return -1; + + vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id, + buffer_desc_count, num_buffers); + + return 0; +} + +static __rte_noinline uint32_t +virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) { - struct vhost_virtqueue *vq; uint32_t pkt_idx = 0; uint16_t num_buffers; struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { - RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, queue_id); - return 0; - } - - vq = dev->virtqueue[queue_id]; - if (unlikely(vq->enabled == 0)) - return 0; - - count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); - if (count == 0) - return 0; + avail_head = *((volatile uint16_t *)&vq->avail->idx); - vq->batch_copy_nb_elems = 0; + /* + * The ordering between avail index and + * desc reads needs to be enforced. + */ + rte_smp_rmb(); rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); - vq->shadow_used_idx = 0; - avail_head = *((volatile uint16_t *)&vq->avail->idx); for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; + uint16_t nr_vec = 0; - if (unlikely(reserve_avail_buf_mergeable(dev, vq, + if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec, &num_buffers, - avail_head) < 0)) { - LOG_DEBUG(VHOST_DATA, + avail_head, &nr_vec) < 0)) { + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) failed to get enough desc from vring\n", dev->vid); vq->shadow_used_idx -= num_buffers; break; } - LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers); - if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx], - buf_vec, num_buffers) < 0) { + if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx], + buf_vec, nr_vec, + num_buffers) < 0) { vq->shadow_used_idx -= num_buffers; break; } @@ -669,20 +1053,200 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, do_data_copy_enqueue(dev, vq); if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring(dev, vq); + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } + + return pkt_idx; +} + +static __rte_always_inline int +virtio_dev_rx_batch_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mbuf **pkts) +{ + bool wrap_counter = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; + uint16_t avail_idx = vq->last_avail_idx; + uint64_t desc_addrs[PACKED_BATCH_SIZE]; + struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE]; + uint32_t buf_offset = dev->vhost_hlen; + uint64_t lens[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; + uint16_t i; + + if (unlikely(avail_idx & PACKED_BATCH_MASK)) + return -1; - /* flush used->idx update before we read avail->flags. */ - rte_mb(); + if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size)) + return -1; + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + if (unlikely(pkts[i]->next != NULL)) + return -1; + if (unlikely(!desc_is_avail(&descs[avail_idx + i], + wrap_counter))) + return -1; + } + + rte_smp_rmb(); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + lens[i] = descs[avail_idx + i].len; + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset))) + return -1; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + desc_addrs[i] = vhost_iova_to_vva(dev, vq, + descs[avail_idx + i].addr, + &lens[i], + VHOST_ACCESS_RW); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + if (unlikely(lens[i] != descs[avail_idx + i].len)) + return -1; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + rte_prefetch0((void *)(uintptr_t)desc_addrs[i]); + hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *) + (uintptr_t)desc_addrs[i]; + lens[i] = pkts[i]->pkt_len + dev->vhost_hlen; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr); + + vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset), + rte_pktmbuf_mtod_offset(pkts[i], void *, 0), + pkts[i]->pkt_len); + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + ids[i] = descs[avail_idx + i].id; + + vhost_flush_enqueue_batch_packed(dev, vq, lens, ids); + + return 0; +} + +static __rte_always_inline int16_t +virtio_dev_rx_single_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mbuf *pkt) +{ + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint16_t nr_descs = 0; + + rte_smp_rmb(); + if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec, + &nr_descs) < 0)) { + VHOST_LOG_DEBUG(VHOST_DATA, + "(%d) failed to get enough desc from vring\n", + dev->vid); + return -1; + } + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", + dev->vid, vq->last_avail_idx, + vq->last_avail_idx + nr_descs); + + vq_inc_last_avail_packed(vq, nr_descs); + + return 0; +} + +static __rte_noinline uint32_t +virtio_dev_rx_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mbuf **pkts, + uint32_t count) +{ + uint32_t pkt_idx = 0; + uint32_t remained = count; + + do { + rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]); + + if (remained >= PACKED_BATCH_SIZE) { + if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) { + pkt_idx += PACKED_BATCH_SIZE; + remained -= PACKED_BATCH_SIZE; + continue; + } + } - /* Kick the guest if necessary. */ - if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && (vq->callfd >= 0)) - eventfd_write(vq->callfd, (eventfd_t)1); + if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx])) + break; + pkt_idx++; + remained--; + + } while (pkt_idx < count); + + if (vq->shadow_used_idx) { + do_data_copy_enqueue(dev, vq); + vhost_flush_enqueue_shadow_packed(dev, vq); } + if (pkt_idx) + vhost_vring_call_packed(dev, vq); + return pkt_idx; } +static __rte_always_inline uint32_t +virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, + struct rte_mbuf **pkts, uint32_t count) +{ + struct vhost_virtqueue *vq; + uint32_t nb_tx = 0; + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { + RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); + + if (unlikely(vq->enabled == 0)) + goto out_access_unlock; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + if (unlikely(vq->access_ok == 0)) + if (unlikely(vring_translate(dev, vq) < 0)) + goto out; + + count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); + if (count == 0) + goto out; + + if (vq_is_packed(dev)) + nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count); + else + nb_tx = virtio_dev_rx_split(dev, vq, pkts, count); + +out: + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + + return nb_tx; +} + uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count) @@ -692,10 +1256,14 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id, if (!dev) return 0; - if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) - return virtio_dev_merge_rx(dev, queue_id, pkts, count); - else - return virtio_dev_rx(dev, queue_id, pkts, count); + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return 0; + } + + return virtio_dev_rx(dev, queue_id, pkts, count); } static inline bool @@ -715,38 +1283,39 @@ virtio_net_with_host_offload(struct virtio_net *dev) static void parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) { - struct ipv4_hdr *ipv4_hdr; - struct ipv6_hdr *ipv6_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; void *l3_hdr = NULL; - struct ether_hdr *eth_hdr; + struct rte_ether_hdr *eth_hdr; uint16_t ethertype; - eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); - m->l2_len = sizeof(struct ether_hdr); + m->l2_len = sizeof(struct rte_ether_hdr); ethertype = rte_be_to_cpu_16(eth_hdr->ether_type); - if (ethertype == ETHER_TYPE_VLAN) { - struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); + if (ethertype == RTE_ETHER_TYPE_VLAN) { + struct rte_vlan_hdr *vlan_hdr = + (struct rte_vlan_hdr *)(eth_hdr + 1); - m->l2_len += sizeof(struct vlan_hdr); + m->l2_len += sizeof(struct rte_vlan_hdr); ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto); } l3_hdr = (char *)eth_hdr + m->l2_len; switch (ethertype) { - case ETHER_TYPE_IPv4: + case RTE_ETHER_TYPE_IPV4: ipv4_hdr = l3_hdr; *l4_proto = ipv4_hdr->next_proto_id; m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; *l4_hdr = (char *)l3_hdr + m->l3_len; m->ol_flags |= PKT_TX_IPV4; break; - case ETHER_TYPE_IPv6: + case RTE_ETHER_TYPE_IPV6: ipv6_hdr = l3_hdr; *l4_proto = ipv6_hdr->proto; - m->l3_len = sizeof(struct ipv6_hdr); + m->l3_len = sizeof(struct rte_ipv6_hdr); *l4_hdr = (char *)l3_hdr + m->l3_len; m->ol_flags |= PKT_TX_IPV6; break; @@ -763,7 +1332,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) { uint16_t l4_proto = 0; void *l4_hdr = NULL; - struct tcp_hdr *tcp_hdr = NULL; + struct rte_tcp_hdr *tcp_hdr = NULL; if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE) return; @@ -772,15 +1341,15 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) { if (hdr->csum_start == (m->l2_len + m->l3_len)) { switch (hdr->csum_offset) { - case (offsetof(struct tcp_hdr, cksum)): + case (offsetof(struct rte_tcp_hdr, cksum)): if (l4_proto == IPPROTO_TCP) m->ol_flags |= PKT_TX_TCP_CKSUM; break; - case (offsetof(struct udp_hdr, dgram_cksum)): + case (offsetof(struct rte_udp_hdr, dgram_cksum)): if (l4_proto == IPPROTO_UDP) m->ol_flags |= PKT_TX_UDP_CKSUM; break; - case (offsetof(struct sctp_hdr, cksum)): + case (offsetof(struct rte_sctp_hdr, cksum)): if (l4_proto == IPPROTO_SCTP) m->ol_flags |= PKT_TX_SCTP_CKSUM; break; @@ -799,6 +1368,11 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) m->tso_segsz = hdr->gso_size; m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; break; + case VIRTIO_NET_HDR_GSO_UDP: + m->ol_flags |= PKT_TX_UDP_SEG; + m->tso_segsz = hdr->gso_size; + m->l4_len = sizeof(struct rte_udp_hdr); + break; default: RTE_LOG(WARNING, VHOST_DATA, "unsupported gso type %u.\n", hdr->gso_type); @@ -807,86 +1381,64 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) } } -#define RARP_PKT_SIZE 64 - -static int -make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac) +static __rte_noinline void +copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr, + struct buf_vector *buf_vec) { - struct ether_hdr *eth_hdr; - struct arp_hdr *rarp; - - if (rarp_mbuf->buf_len < 64) { - RTE_LOG(WARNING, VHOST_DATA, - "failed to make RARP; mbuf size too small %u (< %d)\n", - rarp_mbuf->buf_len, RARP_PKT_SIZE); - return -1; + uint64_t len; + uint64_t remain = sizeof(struct virtio_net_hdr); + uint64_t src; + uint64_t dst = (uint64_t)(uintptr_t)hdr; + + while (remain) { + len = RTE_MIN(remain, buf_vec->buf_len); + src = buf_vec->buf_addr; + rte_memcpy((void *)(uintptr_t)dst, + (void *)(uintptr_t)src, len); + + remain -= len; + dst += len; + buf_vec++; } - - /* Ethernet header. */ - eth_hdr = rte_pktmbuf_mtod_offset(rarp_mbuf, struct ether_hdr *, 0); - memset(eth_hdr->d_addr.addr_bytes, 0xff, ETHER_ADDR_LEN); - ether_addr_copy(mac, ð_hdr->s_addr); - eth_hdr->ether_type = htons(ETHER_TYPE_RARP); - - /* RARP header. */ - rarp = (struct arp_hdr *)(eth_hdr + 1); - rarp->arp_hrd = htons(ARP_HRD_ETHER); - rarp->arp_pro = htons(ETHER_TYPE_IPv4); - rarp->arp_hln = ETHER_ADDR_LEN; - rarp->arp_pln = 4; - rarp->arp_op = htons(ARP_OP_REVREQUEST); - - ether_addr_copy(mac, &rarp->arp_data.arp_sha); - ether_addr_copy(mac, &rarp->arp_data.arp_tha); - memset(&rarp->arp_data.arp_sip, 0x00, 4); - memset(&rarp->arp_data.arp_tip, 0x00, 4); - - rarp_mbuf->pkt_len = rarp_mbuf->data_len = RARP_PKT_SIZE; - - return 0; -} - -static __rte_always_inline void -put_zmbuf(struct zcopy_mbuf *zmbuf) -{ - zmbuf->in_use = 0; } static __rte_always_inline int copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, - struct vring_desc *descs, uint16_t max_desc, - struct rte_mbuf *m, uint16_t desc_idx, - struct rte_mempool *mbuf_pool) + struct buf_vector *buf_vec, uint16_t nr_vec, + struct rte_mbuf *m, struct rte_mempool *mbuf_pool) { - struct vring_desc *desc; - uint64_t desc_addr; - uint32_t desc_avail, desc_offset; + uint32_t buf_avail, buf_offset; + uint64_t buf_addr, buf_iova, buf_len; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; struct rte_mbuf *cur = m, *prev = m; + struct virtio_net_hdr tmp_hdr; struct virtio_net_hdr *hdr = NULL; /* A counter to avoid desc dead loop chain */ - uint32_t nr_desc = 1; + uint16_t vec_idx = 0; struct batch_copy_elem *batch_copy = vq->batch_copy_elems; - uint16_t copy_nb = vq->batch_copy_nb_elems; int error = 0; - desc = &descs[desc_idx]; - if (unlikely((desc->len < dev->vhost_hlen)) || - (desc->flags & VRING_DESC_F_INDIRECT)) { - error = -1; - goto out; - } + buf_addr = buf_vec[vec_idx].buf_addr; + buf_iova = buf_vec[vec_idx].buf_iova; + buf_len = buf_vec[vec_idx].buf_len; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) { + if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) { error = -1; goto out; } if (virtio_net_with_host_offload(dev)) { - hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr); - rte_prefetch0(hdr); + if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) { + /* + * No luck, the virtio-net header doesn't fit + * in a contiguous virtual area. + */ + copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec); + hdr = &tmp_hdr; + } else { + hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr); + } } /* @@ -894,38 +1446,37 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, * for Tx: the first for storing the header, and others * for storing the data. */ - if (likely((desc->len == dev->vhost_hlen) && - (desc->flags & VRING_DESC_F_NEXT) != 0)) { - desc = &descs[desc->next]; - if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) { - error = -1; - goto out; - } - - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) { - error = -1; + if (unlikely(buf_len < dev->vhost_hlen)) { + buf_offset = dev->vhost_hlen - buf_len; + vec_idx++; + buf_addr = buf_vec[vec_idx].buf_addr; + buf_iova = buf_vec[vec_idx].buf_iova; + buf_len = buf_vec[vec_idx].buf_len; + buf_avail = buf_len - buf_offset; + } else if (buf_len == dev->vhost_hlen) { + if (unlikely(++vec_idx >= nr_vec)) goto out; - } + buf_addr = buf_vec[vec_idx].buf_addr; + buf_iova = buf_vec[vec_idx].buf_iova; + buf_len = buf_vec[vec_idx].buf_len; - desc_offset = 0; - desc_avail = desc->len; - nr_desc += 1; + buf_offset = 0; + buf_avail = buf_len; } else { - desc_avail = desc->len - dev->vhost_hlen; - desc_offset = dev->vhost_hlen; + buf_offset = dev->vhost_hlen; + buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen; } - rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset)); - - PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0); + PRINT_PACKET(dev, + (uintptr_t)(buf_addr + buf_offset), + (uint32_t)buf_avail, 0); mbuf_offset = 0; mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; while (1) { uint64_t hpa; - cpy_len = RTE_MIN(desc_avail, mbuf_avail); + cpy_len = RTE_MIN(buf_avail, mbuf_avail); /* * A desc buf might across two host physical pages that are @@ -933,11 +1484,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, * will be copied even though zero copy is enabled. */ if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev, - desc->addr + desc_offset, cpy_len)))) { + buf_iova + buf_offset, cpy_len)))) { cur->data_len = cpy_len; cur->data_off = 0; - cur->buf_addr = (void *)(uintptr_t)desc_addr; - cur->buf_physaddr = hpa; + cur->buf_addr = + (void *)(uintptr_t)(buf_addr + buf_offset); + cur->buf_iova = hpa; /* * In zero copy mode, one mbuf can only reference data @@ -946,57 +1498,45 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, mbuf_avail = cpy_len; } else { if (likely(cpy_len > MAX_BATCH_LEN || - copy_nb >= vq->size)) { + vq->batch_copy_nb_elems >= vq->size || + (hdr && cur == m))) { rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset), - (void *)((uintptr_t)(desc_addr + - desc_offset)), + (void *)((uintptr_t)(buf_addr + + buf_offset)), cpy_len); } else { - batch_copy[copy_nb].dst = + batch_copy[vq->batch_copy_nb_elems].dst = rte_pktmbuf_mtod_offset(cur, void *, mbuf_offset); - batch_copy[copy_nb].src = - (void *)((uintptr_t)(desc_addr + - desc_offset)); - batch_copy[copy_nb].len = cpy_len; - copy_nb++; + batch_copy[vq->batch_copy_nb_elems].src = + (void *)((uintptr_t)(buf_addr + + buf_offset)); + batch_copy[vq->batch_copy_nb_elems].len = + cpy_len; + vq->batch_copy_nb_elems++; } } mbuf_avail -= cpy_len; mbuf_offset += cpy_len; - desc_avail -= cpy_len; - desc_offset += cpy_len; + buf_avail -= cpy_len; + buf_offset += cpy_len; - /* This desc reaches to its end, get the next one */ - if (desc_avail == 0) { - if ((desc->flags & VRING_DESC_F_NEXT) == 0) + /* This buf reaches to its end, get the next one */ + if (buf_avail == 0) { + if (++vec_idx >= nr_vec) break; - if (unlikely(desc->next >= max_desc || - ++nr_desc > max_desc)) { - error = -1; - goto out; - } - desc = &descs[desc->next]; - if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) { - error = -1; - goto out; - } - - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) { - error = -1; - goto out; - } - - rte_prefetch0((void *)(uintptr_t)desc_addr); + buf_addr = buf_vec[vec_idx].buf_addr; + buf_iova = buf_vec[vec_idx].buf_iova; + buf_len = buf_vec[vec_idx].buf_len; - desc_offset = 0; - desc_avail = desc->len; + buf_offset = 0; + buf_avail = buf_len; - PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0); + PRINT_PACKET(dev, (uintptr_t)buf_addr, + (uint32_t)buf_avail, 0); } /* @@ -1032,42 +1572,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, vhost_dequeue_offload(hdr, m); out: - vq->batch_copy_nb_elems = copy_nb; return error; } -static __rte_always_inline void -update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint32_t used_idx, uint32_t desc_idx) -{ - vq->used->ring[used_idx].id = desc_idx; - vq->used->ring[used_idx].len = 0; - vhost_log_used_vring(dev, vq, - offsetof(struct vring_used, ring[used_idx]), - sizeof(vq->used->ring[used_idx])); -} - -static __rte_always_inline void -update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint32_t count) -{ - if (unlikely(count == 0)) - return; - - rte_smp_wmb(); - rte_smp_rmb(); - - vq->used->idx += count; - vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx), - sizeof(vq->used->idx)); - - /* Kick guest if required. */ - if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && (vq->callfd >= 0)) - eventfd_write(vq->callfd, (eventfd_t)1); -} - static __rte_always_inline struct zcopy_mbuf * get_zmbuf(struct vhost_virtqueue *vq) { @@ -1099,169 +1607,563 @@ again: return NULL; } -static __rte_always_inline bool -mbuf_is_consumed(struct rte_mbuf *m) +static void +virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque) { - while (m) { - if (rte_mbuf_refcnt_read(m) > 1) - return false; - m = m->next; + rte_free(opaque); +} + +static int +virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size) +{ + struct rte_mbuf_ext_shared_info *shinfo = NULL; + uint32_t total_len = RTE_PKTMBUF_HEADROOM + size; + uint16_t buf_len; + rte_iova_t iova; + void *buf; + + /* Try to use pkt buffer to store shinfo to reduce the amount of memory + * required, otherwise store shinfo in the new buffer. + */ + if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo)) + shinfo = rte_pktmbuf_mtod(pkt, + struct rte_mbuf_ext_shared_info *); + else { + total_len += sizeof(*shinfo) + sizeof(uintptr_t); + total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t)); + } + + if (unlikely(total_len > UINT16_MAX)) + return -ENOSPC; + + buf_len = total_len; + buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE); + if (unlikely(buf == NULL)) + return -ENOMEM; + + /* Initialize shinfo */ + if (shinfo) { + shinfo->free_cb = virtio_dev_extbuf_free; + shinfo->fcb_opaque = buf; + rte_mbuf_ext_refcnt_set(shinfo, 1); + } else { + shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len, + virtio_dev_extbuf_free, buf); + if (unlikely(shinfo == NULL)) { + rte_free(buf); + RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n"); + return -1; + } } - return true; + iova = rte_malloc_virt2iova(buf); + rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo); + rte_pktmbuf_reset_headroom(pkt); + + return 0; } -uint16_t -rte_vhost_dequeue_burst(int vid, uint16_t queue_id, - struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +/* + * Allocate a host supported pktmbuf. + */ +static __rte_always_inline struct rte_mbuf * +virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp, + uint32_t data_len) { - struct virtio_net *dev; - struct rte_mbuf *rarp_mbuf = NULL; - struct vhost_virtqueue *vq; - uint32_t desc_indexes[MAX_PKT_BURST]; - uint32_t used_idx; - uint32_t i = 0; - uint16_t free_entries; - uint16_t avail_idx; + struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp); - dev = get_device(vid); - if (!dev) - return 0; + if (unlikely(pkt == NULL)) + return NULL; - if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) { - RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, queue_id); - return 0; - } + if (rte_pktmbuf_tailroom(pkt) >= data_len) + return pkt; - vq = dev->virtqueue[queue_id]; - if (unlikely(vq->enabled == 0)) - return 0; + /* attach an external buffer if supported */ + if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len)) + return pkt; - vq->batch_copy_nb_elems = 0; + /* check if chained buffers are allowed */ + if (!dev->linearbuf) + return pkt; + + /* Data doesn't fit into the buffer and the host supports + * only linear buffers + */ + rte_pktmbuf_free(pkt); + + return NULL; +} + +static __rte_noinline uint16_t +virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +{ + uint16_t i; + uint16_t free_entries; if (unlikely(dev->dequeue_zero_copy)) { struct zcopy_mbuf *zmbuf, *next; - int nr_updated = 0; for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); zmbuf != NULL; zmbuf = next) { next = TAILQ_NEXT(zmbuf, next); if (mbuf_is_consumed(zmbuf->mbuf)) { - used_idx = vq->last_used_idx++ & (vq->size - 1); - update_used_ring(dev, vq, used_idx, - zmbuf->desc_idx); - nr_updated += 1; - + update_shadow_used_ring_split(vq, + zmbuf->desc_idx, 0); TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); rte_pktmbuf_free(zmbuf->mbuf); put_zmbuf(zmbuf); vq->nr_zmbuf -= 1; } } - update_used_idx(dev, vq, nr_updated); - } - - /* - * Construct a RARP broadcast packet, and inject it to the "pkts" - * array, to looks like that guest actually send such packet. - * - * Check user_send_rarp() for more information. - * - * broadcast_rarp shares a cacheline in the virtio_net structure - * with some fields that are accessed during enqueue and - * rte_atomic16_cmpset() causes a write if using cmpxchg. This could - * result in false sharing between enqueue and dequeue. - * - * Prevent unnecessary false sharing by reading broadcast_rarp first - * and only performing cmpset if the read indicates it is likely to - * be set. - */ - - if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) && - rte_atomic16_cmpset((volatile uint16_t *) - &dev->broadcast_rarp.cnt, 1, 0))) { - - rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool); - if (rarp_mbuf == NULL) { - RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); - return 0; - } - - if (make_rarp_packet(rarp_mbuf, &dev->mac)) { - rte_pktmbuf_free(rarp_mbuf); - rarp_mbuf = NULL; - } else { - count -= 1; + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); } } free_entries = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; if (free_entries == 0) - goto out; + return 0; + + /* + * The ordering between avail index and + * desc reads needs to be enforced. + */ + rte_smp_rmb(); - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); - /* Prefetch available and used ring */ - avail_idx = vq->last_avail_idx & (vq->size - 1); - used_idx = vq->last_used_idx & (vq->size - 1); - rte_prefetch0(&vq->avail->ring[avail_idx]); - rte_prefetch0(&vq->used->ring[used_idx]); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); count = RTE_MIN(count, MAX_PKT_BURST); count = RTE_MIN(count, free_entries); - LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", dev->vid, count); - /* Retrieve all of the head indexes first to avoid caching issues. */ for (i = 0; i < count; i++) { - avail_idx = (vq->last_avail_idx + i) & (vq->size - 1); - used_idx = (vq->last_used_idx + i) & (vq->size - 1); - desc_indexes[i] = vq->avail->ring[avail_idx]; + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint16_t head_idx; + uint32_t buf_len; + uint16_t nr_vec = 0; + int err; + + if (unlikely(fill_vec_buf_split(dev, vq, + vq->last_avail_idx + i, + &nr_vec, buf_vec, + &head_idx, &buf_len, + VHOST_ACCESS_RO) < 0)) + break; if (likely(dev->dequeue_zero_copy == 0)) - update_used_ring(dev, vq, used_idx, desc_indexes[i]); - } + update_shadow_used_ring_split(vq, head_idx, 0); - /* Prefetch descriptor index. */ - rte_prefetch0(&vq->desc[desc_indexes[0]]); - for (i = 0; i < count; i++) { - struct vring_desc *desc; - uint16_t sz, idx; - int err; + pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len); + if (unlikely(pkts[i] == NULL)) + break; - if (likely(i + 1 < count)) - rte_prefetch0(&vq->desc[desc_indexes[i + 1]]); + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i], + mbuf_pool); + if (unlikely(err)) { + rte_pktmbuf_free(pkts[i]); + break; + } - if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) { - desc = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, - vq->desc[desc_indexes[i]].addr); - if (unlikely(!desc)) + if (unlikely(dev->dequeue_zero_copy)) { + struct zcopy_mbuf *zmbuf; + + zmbuf = get_zmbuf(vq); + if (!zmbuf) { + rte_pktmbuf_free(pkts[i]); break; + } + zmbuf->mbuf = pkts[i]; + zmbuf->desc_idx = head_idx; - rte_prefetch0(desc); - sz = vq->desc[desc_indexes[i]].len / sizeof(*desc); - idx = 0; - } else { - desc = vq->desc; - sz = vq->size; - idx = desc_indexes[i]; + /* + * Pin lock the mbuf; we will check later to see + * whether the mbuf is freed (when we are the last + * user) or not. If that's the case, we then could + * update the used ring safely. + */ + rte_mbuf_refcnt_update(pkts[i], 1); + + vq->nr_zmbuf += 1; + TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); } + } + vq->last_avail_idx += i; - pkts[i] = rte_pktmbuf_alloc(mbuf_pool); - if (unlikely(pkts[i] == NULL)) { - RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); - break; + if (likely(dev->dequeue_zero_copy == 0)) { + do_data_copy_dequeue(vq); + if (unlikely(i < count)) + vq->shadow_used_idx = i; + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } + } + + return i; +} + +static __rte_always_inline int +vhost_reserve_avail_batch_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts, + uint16_t avail_idx, + uintptr_t *desc_addrs, + uint16_t *ids) +{ + bool wrap = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; + struct virtio_net_hdr *hdr; + uint64_t lens[PACKED_BATCH_SIZE]; + uint64_t buf_lens[PACKED_BATCH_SIZE]; + uint32_t buf_offset = dev->vhost_hlen; + uint16_t flags, i; + + if (unlikely(avail_idx & PACKED_BATCH_MASK)) + return -1; + if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size)) + return -1; + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + flags = descs[avail_idx + i].flags; + if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) || + (wrap == !!(flags & VRING_DESC_F_USED)) || + (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG))) + return -1; + } + + rte_smp_rmb(); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + lens[i] = descs[avail_idx + i].len; + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + desc_addrs[i] = vhost_iova_to_vva(dev, vq, + descs[avail_idx + i].addr, + &lens[i], VHOST_ACCESS_RW); + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + if (unlikely((lens[i] != descs[avail_idx + i].len))) + return -1; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]); + if (!pkts[i]) + goto free_buf; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off; + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + if (unlikely(buf_lens[i] < (lens[i] - buf_offset))) + goto free_buf; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset; + pkts[i]->data_len = pkts[i]->pkt_len; + ids[i] = descs[avail_idx + i].id; + } + + if (virtio_net_with_host_offload(dev)) { + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + hdr = (struct virtio_net_hdr *)(desc_addrs[i]); + vhost_dequeue_offload(hdr, pkts[i]); } + } + + return 0; + +free_buf: + for (i = 0; i < PACKED_BATCH_SIZE; i++) + rte_pktmbuf_free(pkts[i]); + + return -1; +} + +static __rte_unused int +virtio_dev_tx_batch_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts) +{ + uint16_t avail_idx = vq->last_avail_idx; + uint32_t buf_offset = dev->vhost_hlen; + uintptr_t desc_addrs[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; + uint16_t i; + + if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts, + avail_idx, desc_addrs, ids)) + return -1; - err = copy_desc_to_mbuf(dev, vq, desc, sz, pkts[i], idx, - mbuf_pool); + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + rte_prefetch0((void *)(uintptr_t)desc_addrs[i]); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0), + (void *)(uintptr_t)(desc_addrs[i] + buf_offset), + pkts[i]->pkt_len); + + vhost_shadow_dequeue_batch_packed(dev, vq, ids); + + vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE); + + return 0; +} + +static __rte_always_inline int +vhost_dequeue_single_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts, + uint16_t *buf_id, + uint16_t *desc_count) +{ + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint32_t buf_len; + uint16_t nr_vec = 0; + int err; + + if (unlikely(fill_vec_buf_packed(dev, vq, + vq->last_avail_idx, desc_count, + buf_vec, &nr_vec, + buf_id, &buf_len, + VHOST_ACCESS_RO) < 0)) + return -1; + + *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len); + if (unlikely(*pkts == NULL)) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to allocate memory for mbuf.\n"); + return -1; + } + + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts, + mbuf_pool); + if (unlikely(err)) { + rte_pktmbuf_free(*pkts); + return -1; + } + + return 0; +} + +static __rte_unused int +virtio_dev_tx_single_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts) +{ + + uint16_t buf_id, desc_count; + + if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id, + &desc_count)) + return -1; + + vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count); + + vq_inc_last_avail_packed(vq, desc_count); + + return 0; +} + +static __rte_unused int +virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts) +{ + struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE]; + uintptr_t desc_addrs[PACKED_BATCH_SIZE]; + uint16_t ids[PACKED_BATCH_SIZE]; + uint16_t i; + + uint16_t avail_idx = vq->last_avail_idx; + + if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts, + avail_idx, desc_addrs, ids)) + return -1; + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + zmbufs[i] = get_zmbuf(vq); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + if (!zmbufs[i]) + goto free_pkt; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { + zmbufs[i]->mbuf = pkts[i]; + zmbufs[i]->desc_idx = avail_idx + i; + zmbufs[i]->desc_count = 1; + } + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + rte_mbuf_refcnt_update(pkts[i], 1); + + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next); + + vq->nr_zmbuf += PACKED_BATCH_SIZE; + vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE); + + return 0; + +free_pkt: + vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) + rte_pktmbuf_free(pkts[i]); + + return -1; +} + +static __rte_unused int +virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, + struct rte_mbuf **pkts) +{ + uint16_t buf_id, desc_count; + struct zcopy_mbuf *zmbuf; + + if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id, + &desc_count)) + return -1; + + zmbuf = get_zmbuf(vq); + if (!zmbuf) { + rte_pktmbuf_free(*pkts); + return -1; + } + zmbuf->mbuf = *pkts; + zmbuf->desc_idx = vq->last_avail_idx; + zmbuf->desc_count = desc_count; + + rte_mbuf_refcnt_update(*pkts, 1); + + vq->nr_zmbuf += 1; + TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); + + vq_inc_last_avail_packed(vq, desc_count); + return 0; +} + +static __rte_unused void +free_zmbuf(struct vhost_virtqueue *vq) +{ + struct zcopy_mbuf *next = NULL; + struct zcopy_mbuf *zmbuf; + + for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); + zmbuf != NULL; zmbuf = next) { + next = TAILQ_NEXT(zmbuf, next); + + uint16_t last_used_idx = vq->last_used_idx; + + if (mbuf_is_consumed(zmbuf->mbuf)) { + uint16_t flags; + flags = vq->desc_packed[last_used_idx].flags; + if (vq->used_wrap_counter) { + flags |= VRING_DESC_F_USED; + flags |= VRING_DESC_F_AVAIL; + } else { + flags &= ~VRING_DESC_F_USED; + flags &= ~VRING_DESC_F_AVAIL; + } + + vq->desc_packed[last_used_idx].id = zmbuf->desc_idx; + vq->desc_packed[last_used_idx].len = 0; + + rte_smp_wmb(); + vq->desc_packed[last_used_idx].flags = flags; + + vq_inc_last_used_packed(vq, zmbuf->desc_count); + + TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); + rte_pktmbuf_free(zmbuf->mbuf); + put_zmbuf(zmbuf); + vq->nr_zmbuf -= 1; + } + } +} + +static __rte_noinline uint16_t +virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +{ + uint16_t i; + + if (unlikely(dev->dequeue_zero_copy)) { + struct zcopy_mbuf *zmbuf, *next; + + for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); + zmbuf != NULL; zmbuf = next) { + next = TAILQ_NEXT(zmbuf, next); + + if (mbuf_is_consumed(zmbuf->mbuf)) { + update_shadow_used_ring_packed(vq, + zmbuf->desc_idx, + 0, + zmbuf->desc_count); + + TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); + rte_pktmbuf_free(zmbuf->mbuf); + put_zmbuf(zmbuf); + vq->nr_zmbuf -= 1; + } + } + + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } + } + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + + count = RTE_MIN(count, MAX_PKT_BURST); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", + dev->vid, count); + + for (i = 0; i < count; i++) { + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint16_t buf_id; + uint32_t buf_len; + uint16_t desc_count, nr_vec = 0; + int err; + + if (unlikely(fill_vec_buf_packed(dev, vq, + vq->last_avail_idx, &desc_count, + buf_vec, &nr_vec, + &buf_id, &buf_len, + VHOST_ACCESS_RO) < 0)) + break; + + if (likely(dev->dequeue_zero_copy == 0)) + update_shadow_used_ring_packed(vq, buf_id, 0, + desc_count); + + pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len); + if (unlikely(pkts[i] == NULL)) + break; + + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i], + mbuf_pool); if (unlikely(err)) { rte_pktmbuf_free(pkts[i]); break; @@ -1276,7 +2178,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, break; } zmbuf->mbuf = pkts[i]; - zmbuf->desc_idx = desc_indexes[i]; + zmbuf->desc_idx = buf_id; + zmbuf->desc_count = desc_count; /* * Pin lock the mbuf; we will check later to see @@ -1289,25 +2192,117 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vq->nr_zmbuf += 1; TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); } + + vq_inc_last_avail_packed(vq, desc_count); } - vq->last_avail_idx += i; if (likely(dev->dequeue_zero_copy == 0)) { do_data_copy_dequeue(vq); - vq->last_used_idx += i; - update_used_idx(dev, vq, i); + if (unlikely(i < count)) + vq->shadow_used_idx = i; + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } + return i; +} + +uint16_t +rte_vhost_dequeue_burst(int vid, uint16_t queue_id, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +{ + struct virtio_net *dev; + struct rte_mbuf *rarp_mbuf = NULL; + struct vhost_virtqueue *vq; + + dev = get_device(vid); + if (!dev) + return 0; + + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return 0; + } + + if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) { + RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + vq = dev->virtqueue[queue_id]; + + if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) + return 0; + + if (unlikely(vq->enabled == 0)) { + count = 0; + goto out_access_unlock; + } + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + if (unlikely(vq->access_ok == 0)) + if (unlikely(vring_translate(dev, vq) < 0)) { + count = 0; + goto out; + } + + /* + * Construct a RARP broadcast packet, and inject it to the "pkts" + * array, to looks like that guest actually send such packet. + * + * Check user_send_rarp() for more information. + * + * broadcast_rarp shares a cacheline in the virtio_net structure + * with some fields that are accessed during enqueue and + * rte_atomic16_cmpset() causes a write if using cmpxchg. This could + * result in false sharing between enqueue and dequeue. + * + * Prevent unnecessary false sharing by reading broadcast_rarp first + * and only performing cmpset if the read indicates it is likely to + * be set. + */ + if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) && + rte_atomic16_cmpset((volatile uint16_t *) + &dev->broadcast_rarp.cnt, 1, 0))) { + + rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac); + if (rarp_mbuf == NULL) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to make RARP packet.\n"); + count = 0; + goto out; + } + count -= 1; + } + + if (vq_is_packed(dev)) + count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count); + else + count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count); + out: + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + if (unlikely(rarp_mbuf != NULL)) { /* * Inject it to the head of "pkts" array, so that switch's mac * learning table will get updated first. */ - memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *)); + memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *)); pkts[0] = rarp_mbuf; - i += 1; + count += 1; } - return i; + return count; }