X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=a5f0eebaa8938c87915f03806a73d816554c4cc0;hb=92ac1d5a13859e5dc65c94aaa94559680f2e26c9;hp=8f2ade1dfd7661c7d1982fbc3b7f3bc7a4e6f5ee;hpb=45847f015d6ed218cd86124ef91857b75f766a85;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 8f2ade1dfd..a5f0eebaa8 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -39,7 +39,7 @@ #include #include #include -#include +#include #include #include #include @@ -48,50 +48,14 @@ #include "vhost.h" #define MAX_PKT_BURST 32 -#define VHOST_LOG_PAGE 4096 - -static inline void __attribute__((always_inline)) -vhost_log_page(uint8_t *log_base, uint64_t page) -{ - log_base[page / 8] |= 1 << (page % 8); -} - -static inline void __attribute__((always_inline)) -vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) -{ - uint64_t page; - - if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || - !dev->log_base || !len)) - return; - - if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) - return; - - /* To make sure guest memory updates are committed before logging */ - rte_smp_wmb(); - - page = addr / VHOST_LOG_PAGE; - while (page * VHOST_LOG_PAGE < addr + len) { - vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); - page += 1; - } -} - -static inline void __attribute__((always_inline)) -vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t offset, uint64_t len) -{ - vhost_log_write(dev, vq->log_guest_addr + offset, len); -} static bool -is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb) +is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) { - return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM; + return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t to, uint16_t from, uint16_t size) { @@ -103,7 +67,7 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, size * sizeof(struct vring_used_elem)); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t used_idx = vq->last_used_idx & (vq->size - 1); @@ -131,7 +95,7 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) sizeof(vq->used->idx)); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void update_shadow_used_ring(struct vhost_virtqueue *vq, uint16_t desc_idx, uint16_t len) { @@ -141,14 +105,25 @@ update_shadow_used_ring(struct vhost_virtqueue *vq, vq->shadow_used_ring[i].len = len; } +/* avoid write operation when necessary, to lessen cache issues */ +#define ASSIGN_UNLESS_EQUAL(var, val) do { \ + if ((var) != (val)) \ + (var) = (val); \ +} while (0) + static void virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) { - if (m_buf->ol_flags & PKT_TX_L4_MASK) { + uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK; + + if (m_buf->ol_flags & PKT_TX_TCP_SEG) + csum_l4 |= PKT_TX_TCP_CKSUM; + + if (csum_l4) { net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len; - switch (m_buf->ol_flags & PKT_TX_L4_MASK) { + switch (csum_l4) { case PKT_TX_TCP_CKSUM: net_hdr->csum_offset = (offsetof(struct tcp_hdr, cksum)); @@ -162,6 +137,19 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) cksum)); break; } + } else { + ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0); + } + + /* IP cksum verification cannot be bypassed, then calculate here */ + if (m_buf->ol_flags & PKT_TX_IP_CKSUM) { + struct ipv4_hdr *ipv4_hdr; + + ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *, + m_buf->l2_len); + ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); } if (m_buf->ol_flags & PKT_TX_TCP_SEG) { @@ -172,32 +160,27 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) net_hdr->gso_size = m_buf->tso_segsz; net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len + m_buf->l4_len; + } else { + ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0); } } -static inline void -copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr, - struct virtio_net_hdr_mrg_rxbuf hdr) -{ - if (dev->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf)) - *(struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc_addr = hdr; - else - *(struct virtio_net_hdr *)(uintptr_t)desc_addr = hdr.hdr; -} - -static inline int __attribute__((always_inline)) -copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, - struct rte_mbuf *m, uint16_t desc_idx) +static __rte_always_inline int +copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, + struct rte_mbuf *m, uint16_t desc_idx, uint32_t size) { uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; struct vring_desc *desc; uint64_t desc_addr; - struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0}; + /* A counter to avoid desc dead loop chain */ + uint16_t nr_desc = 1; - desc = &vq->desc[desc_idx]; - desc_addr = gpa_to_vva(dev, desc->addr); + desc = &descs[desc_idx]; + desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); /* * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid * performance issue with some versions of gcc (4.8.4 and 5.3.0) which @@ -208,8 +191,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, rte_prefetch0((void *)(uintptr_t)desc_addr); - virtio_enqueue_offload(m, &virtio_hdr.hdr); - copy_virtio_net_hdr(dev, desc_addr, virtio_hdr); + virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr); vhost_log_write(dev, desc->addr, dev->vhost_hlen); PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0); @@ -233,11 +215,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, /* Room in vring buffer is not enough */ return -1; } - if (unlikely(desc->next >= vq->size)) + if (unlikely(desc->next >= size || ++nr_desc > size)) return -1; - desc = &vq->desc[desc->next]; - desc_addr = gpa_to_vva(dev, desc->addr); + desc = &descs[desc->next]; + desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); if (unlikely(!desc_addr)) return -1; @@ -265,22 +247,23 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, /** * This function adds buffers to the virtio devices RX virtqueue. Buffers can * be received from the physical port or from another virtio device. A packet - * count is returned to indicate the number of packets that are succesfully + * count is returned to indicate the number of packets that are successfully * added to the RX queue. This function works when the mbuf is scattered, but * it doesn't support the mergeable feature. */ -static inline uint32_t __attribute__((always_inline)) +static __rte_always_inline uint32_t virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) { struct vhost_virtqueue *vq; uint16_t avail_idx, free_entries, start_idx; uint16_t desc_indexes[MAX_PKT_BURST]; + struct vring_desc *descs; uint16_t used_idx; - uint32_t i; + uint32_t i, sz; LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) { + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); return 0; @@ -319,7 +302,23 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, uint16_t desc_idx = desc_indexes[i]; int err; - err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx); + if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) { + descs = (struct vring_desc *)(uintptr_t) + rte_vhost_gpa_to_vva(dev->mem, + vq->desc[desc_idx].addr); + if (unlikely(!descs)) { + count = i; + break; + } + + desc_idx = 0; + sz = vq->desc[desc_idx].len / sizeof(*descs); + } else { + descs = vq->desc; + sz = vq->size; + } + + err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz); if (unlikely(err)) { used_idx = (start_idx + i) & (vq->size - 1); vq->used->ring[used_idx].len = dev->vhost_hlen; @@ -350,30 +349,42 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, return count; } -static inline int __attribute__((always_inline)) -fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx, - uint32_t *vec_idx, struct buf_vector *buf_vec, - uint16_t *desc_chain_head, uint16_t *desc_chain_len) +static __rte_always_inline int +fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint32_t avail_idx, uint32_t *vec_idx, + struct buf_vector *buf_vec, uint16_t *desc_chain_head, + uint16_t *desc_chain_len) { uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; uint32_t vec_id = *vec_idx; uint32_t len = 0; + struct vring_desc *descs = vq->desc; *desc_chain_head = idx; + + if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) { + descs = (struct vring_desc *)(uintptr_t) + rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr); + if (unlikely(!descs)) + return -1; + + idx = 0; + } + while (1) { if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) return -1; - len += vq->desc[idx].len; - buf_vec[vec_id].buf_addr = vq->desc[idx].addr; - buf_vec[vec_id].buf_len = vq->desc[idx].len; + len += descs[idx].len; + buf_vec[vec_id].buf_addr = descs[idx].addr; + buf_vec[vec_id].buf_len = descs[idx].len; buf_vec[vec_id].desc_idx = idx; vec_id++; - if ((vq->desc[idx].flags & VRING_DESC_F_NEXT) == 0) + if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0) break; - idx = vq->desc[idx].next; + idx = descs[idx].next; } *desc_chain_len = len; @@ -386,11 +397,11 @@ fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx, * Returns -1 on fail, 0 on success */ static inline int -reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size, - struct buf_vector *buf_vec, uint16_t *num_buffers) +reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint32_t size, struct buf_vector *buf_vec, + uint16_t *num_buffers, uint16_t avail_head) { uint16_t cur_idx; - uint16_t avail_idx; uint32_t vec_idx = 0; uint16_t tries = 0; @@ -401,12 +412,11 @@ reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size, cur_idx = vq->last_avail_idx; while (size > 0) { - avail_idx = *((volatile uint16_t *)&vq->avail->idx); - if (unlikely(cur_idx == avail_idx)) + if (unlikely(cur_idx == avail_head)) return -1; - if (unlikely(fill_vec_buf(vq, cur_idx, &vec_idx, buf_vec, - &head_idx, &len) < 0)) + if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec, + &head_idx, &len) < 0)) return -1; len = RTE_MIN(len, size); update_shadow_used_ring(vq, head_idx, len); @@ -428,11 +438,10 @@ reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size, return 0; } -static inline int __attribute__((always_inline)) +static __rte_always_inline int copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, struct buf_vector *buf_vec, uint16_t num_buffers) { - struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0}; uint32_t vec_idx = 0; uint64_t desc_addr; uint32_t mbuf_offset, mbuf_avail; @@ -444,7 +453,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, if (unlikely(m == NULL)) return -1; - desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); + desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr); if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) return -1; @@ -453,7 +462,6 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, hdr_phys_addr = buf_vec[vec_idx].buf_addr; rte_prefetch0((void *)(uintptr_t)hdr_addr); - virtio_hdr.num_buffers = num_buffers; LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", dev->vid, num_buffers); @@ -466,7 +474,8 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, /* done with current desc buf, get the next one */ if (desc_avail == 0) { vec_idx++; - desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); + desc_addr = rte_vhost_gpa_to_vva(dev->mem, + buf_vec[vec_idx].buf_addr); if (unlikely(!desc_addr)) return -1; @@ -485,8 +494,13 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, } if (hdr_addr) { - virtio_enqueue_offload(hdr_mbuf, &virtio_hdr.hdr); - copy_virtio_net_hdr(dev, hdr_addr, virtio_hdr); + struct virtio_net_hdr_mrg_rxbuf *hdr; + + hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t) + hdr_addr; + virtio_enqueue_offload(hdr_mbuf, &hdr->hdr); + ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers); + vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen); PRINT_PACKET(dev, (uintptr_t)hdr_addr, dev->vhost_hlen, 0); @@ -512,7 +526,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, return 0; } -static inline uint32_t __attribute__((always_inline)) +static __rte_always_inline uint32_t virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) { @@ -520,9 +534,10 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, uint32_t pkt_idx = 0; uint16_t num_buffers; struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint16_t avail_head; LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) { + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); return 0; @@ -539,11 +554,13 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); vq->shadow_used_idx = 0; + avail_head = *((volatile uint16_t *)&vq->avail->idx); for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; - if (unlikely(reserve_avail_buf_mergeable(vq, pkt_len, buf_vec, - &num_buffers) < 0)) { + if (unlikely(reserve_avail_buf_mergeable(dev, vq, + pkt_len, buf_vec, &num_buffers, + avail_head) < 0)) { LOG_DEBUG(VHOST_DATA, "(%d) failed to get enough desc from vring\n", dev->vid); @@ -598,9 +615,11 @@ static inline bool virtio_net_with_host_offload(struct virtio_net *dev) { if (dev->features & - (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN | - VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | - VIRTIO_NET_F_HOST_UFO)) + ((1ULL << VIRTIO_NET_F_CSUM) | + (1ULL << VIRTIO_NET_F_HOST_ECN) | + (1ULL << VIRTIO_NET_F_HOST_TSO4) | + (1ULL << VIRTIO_NET_F_HOST_TSO6) | + (1ULL << VIRTIO_NET_F_HOST_UFO))) return true; return false; @@ -631,14 +650,14 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) switch (ethertype) { case ETHER_TYPE_IPv4: - ipv4_hdr = (struct ipv4_hdr *)l3_hdr; + ipv4_hdr = l3_hdr; *l4_proto = ipv4_hdr->next_proto_id; m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; *l4_hdr = (char *)l3_hdr + m->l3_len; m->ol_flags |= PKT_TX_IPV4; break; case ETHER_TYPE_IPv6: - ipv6_hdr = (struct ipv6_hdr *)l3_hdr; + ipv6_hdr = l3_hdr; *l4_proto = ipv6_hdr->proto; m->l3_len = sizeof(struct ipv6_hdr); *l4_hdr = (char *)l3_hdr + m->l3_len; @@ -647,11 +666,12 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) default: m->l3_len = 0; *l4_proto = 0; + *l4_hdr = NULL; break; } } -static inline void __attribute__((always_inline)) +static __rte_always_inline void vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) { uint16_t l4_proto = 0; @@ -683,11 +703,11 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) } } - if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { + if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: case VIRTIO_NET_HDR_GSO_TCPV6: - tcp_hdr = (struct tcp_hdr *)l4_hdr; + tcp_hdr = l4_hdr; m->ol_flags |= PKT_TX_TCP_SEG; m->tso_segsz = hdr->gso_size; m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; @@ -739,13 +759,13 @@ make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac) return 0; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void put_zmbuf(struct zcopy_mbuf *zmbuf) { zmbuf->in_use = 0; } -static inline int __attribute__((always_inline)) +static __rte_always_inline int copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx, struct rte_mempool *mbuf_pool) @@ -765,7 +785,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, (desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); + desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); if (unlikely(!desc_addr)) return -1; @@ -785,7 +805,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); + desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); if (unlikely(!desc_addr)) return -1; @@ -849,7 +869,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) return -1; - desc_addr = gpa_to_vva(dev, desc->addr); + desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); if (unlikely(!desc_addr)) return -1; @@ -872,6 +892,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, "allocate memory for mbuf.\n"); return -1; } + if (unlikely(dev->dequeue_zero_copy)) + rte_mbuf_refcnt_update(cur, 1); prev->next = cur; prev->data_len = mbuf_offset; @@ -893,7 +915,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, return 0; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t used_idx, uint32_t desc_idx) { @@ -904,7 +926,7 @@ update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, sizeof(vq->used->ring[used_idx])); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t count) { @@ -924,7 +946,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq, eventfd_write(vq->callfd, (eventfd_t)1); } -static inline struct zcopy_mbuf *__attribute__((always_inline)) +static __rte_always_inline struct zcopy_mbuf * get_zmbuf(struct vhost_virtqueue *vq) { uint16_t i; @@ -955,7 +977,7 @@ again: return NULL; } -static inline bool __attribute__((always_inline)) +static __rte_always_inline bool mbuf_is_consumed(struct rte_mbuf *m) { while (m) { @@ -984,7 +1006,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (!dev) return 0; - if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) { + if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); return 0; @@ -1023,9 +1045,21 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, * array, to looks like that guest actually send such packet. * * Check user_send_rarp() for more information. + * + * broadcast_rarp shares a cacheline in the virtio_net structure + * with some fields that are accessed during enqueue and + * rte_atomic16_cmpset() causes a write if using cmpxchg. This could + * result in false sharing between enqueue and dequeue. + * + * Prevent unnecessary false sharing by reading broadcast_rarp first + * and only performing cmpset if the read indicates it is likely to + * be set. */ - if (unlikely(rte_atomic16_cmpset((volatile uint16_t *) - &dev->broadcast_rarp.cnt, 1, 0))) { + + if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) && + rte_atomic16_cmpset((volatile uint16_t *) + &dev->broadcast_rarp.cnt, 1, 0))) { + rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool); if (rarp_mbuf == NULL) { RTE_LOG(ERR, VHOST_DATA, @@ -1080,7 +1114,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, rte_prefetch0(&vq->desc[desc_indexes[i + 1]]); if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) { - desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev, + desc = (struct vring_desc *)(uintptr_t) + rte_vhost_gpa_to_vva(dev->mem, vq->desc[desc_indexes[i]].addr); if (unlikely(!desc)) break;